diff --git a/channels.json b/channels.json index 1ba78edd..f9b67f5b 100644 --- a/channels.json +++ b/channels.json @@ -15,6 +15,7 @@ "cinemalibero": "https://cinemalibero.blog", "cinetecadibologna": "http://cinestore.cinetecadibologna.it", "discoveryplus": "https://www.discoveryplus.com", + "tapmovie": "https://it.tapmovie.net", "dreamsub": "https://dreamsub.stream", "dsda": "https://www.dsda.press", "eurostreaming": "https://eurostreaming.click", diff --git a/channels/tapmovie.json b/channels/tapmovie.json new file mode 100644 index 00000000..c54164d4 --- /dev/null +++ b/channels/tapmovie.json @@ -0,0 +1,11 @@ +{ + "id": "tapmovie", + "name": "Tap Movie", + "language": ["ita", "sub-ita"], + "active": true, + "thumbnail": "", + "banner": "", + "categories": ["movie", "tvshow", "anime"], + "not_active": ["include_in_newest"], + "settings": [] +} diff --git a/channels/tapmovie.py b/channels/tapmovie.py new file mode 100644 index 00000000..7c5863d1 --- /dev/null +++ b/channels/tapmovie.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------ +# Canale per 'dvdita' + +from core import support, httptools +from core.item import Item + +host = support.config.get_channel_url() +api_url = '/api/v2/' + + +@support.menu +def mainlist(item): + film = ['/browse/movie'] + tvshow = ['/browse/tvshow'] + search = '' + + # [Voce Menu,['url','action','args',contentType] + # top = [('Generi', ['', 'genres', '', 'undefined'])] + + return locals() + + +def peliculas(item): + itemlist = [] + movie = item.contentType == 'movie' + key = 'latest_updated' if movie else 'latest_tvshows' + action = 'findvideos' if movie else 'episodios' + json = httptools.downloadpage(host + api_url + 'home', post={}).json[key] + for i in json: + itemlist.append(item.clone(id=i.get('id'), title=i.get('title'), contentTitle=i.get('title'), contentSerieName='' if movie else i.get('title'), + contentPlot=i.get('description'), thumbnail=i.get('poster'), + fanart=i.get('backdrop'), year=i.get('year'), action=action, + url='{}/{}/{}-{}'.format(host, item.contentType, i.get('id'), support.scrapertools.slugify(i.get('title'))))) + support.tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + return itemlist + + +def episodios(item): + support.info(item) + itemlist = [] + + for season in httptools.downloadpage(host + api_url + 'tvshow', post={'tvshow_id': item.id}).json.get('season', []): + season_id = season['season_number'] + for episode in httptools.downloadpage(host + api_url + 'episodes', post={'tvshow_id': item.id, 'season_id': season_id}).json.get('episodes', []): + itemlist.append(item.clone(action="findvideos", contentSeason=season_id, contentEpisodeNumber=episode['episode_number'], id=item.id, + title=str(season_id)+'x'+episode['episode_number'], contentType='episode')) + support.videolibrary(itemlist, item) + support.download(itemlist, item) + + return itemlist + + +def search(item, text): + support.info('search', item) + itemlist = [] + for result in httptools.downloadpage(host + api_url + 'search', post={'search': text}).json.get('results', []): + contentType = 'movie' if result['type'] == 'FILM' else 'tvshow' + itemlist.append(item.clone(id=result.get('id'), title=result.get('title'), contentTitle=result.get('title'), + contentSerieName='' if contentType == 'movie' else result.get('title'), + contentPlot=result.get('description'), thumbnail=result.get('poster'), + fanart=result.get('backdrop'), year=result.get('year'), action='episodios' if contentType == 'tvshow' else 'findvideos', + url='{}/{}/{}-{}'.format(host, contentType, result.get('id'), support.scrapertools.slugify(result.get('title'))), + contentType=contentType)) + support.tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + return itemlist + + +def findvideos(item): + itemlist = [] + if not item.contentSeason: # film + json = httptools.downloadpage(host + api_url + 'movie', post={'movie_id': item.id}).json + else: + json = httptools.downloadpage(host + api_url + 'episode/links', post={'tvshow_id': item.id, 'season_id': item.contentSeason, 'episode_id': item.contentEpisodeNumber}).json + + for i in json.get('links', []) + json.get('special', []): + itemlist.append(Item(url=i.get('link'))) + return support.server(item, itemlist=itemlist) diff --git a/core/support.py b/core/support.py index 2544c899..f15760a6 100755 --- a/core/support.py +++ b/core/support.py @@ -1145,7 +1145,10 @@ def nextPage(itemlist, item, data='', patron='', function_or_level=1, next_page= if next_page != "": if resub: next_page = re.sub(resub[0], resub[1], next_page) if 'http' not in next_page: - next_page = scrapertools.find_single_match(item.url, 'https?://[a-z0-9.-]+') + (next_page if next_page.startswith('/') else '/' + next_page) + if '/' in next_page: + next_page = scrapertools.find_single_match(item.url, 'https?://[a-z0-9.-]+') + (next_page if next_page.startswith('/') else '/' + next_page) + else: + next_page = '/'.join(item.url.split('/')[:-1]) + '/' + next_page next_page = next_page.replace('&', '&') logger.debug('NEXT= ', next_page) itemlist.append( @@ -1368,15 +1371,27 @@ def addQualityTag(item, itemlist, data, patron): info('nessun tag qualità trovato') def get_jwplayer_mediaurl(data, srvName, onlyHttp=False, dataIsBlock=False): + from core import jsontools + video_urls = [] - block = scrapertools.find_single_match(data, r'sources:\s*\[([^\]]+)\]') if not dataIsBlock else data + block = scrapertools.find_single_match(data, r'sources:\s*([^\]]+\])') if not dataIsBlock else data if block: - if 'file:' in block: - sources = scrapertools.find_multiple_matches(block, r'file:\s*"([^"]+)"(?:,label:\s*"([^"]+)")?') - elif 'src:' in block: - sources = scrapertools.find_multiple_matches(block, r'src:\s*"([^"]+)",\s*type:\s*"[^"]+"(?:,[^,]+,\s*label:\s*"([^"]+)")?') + json = jsontools.load(block) + if json: + sources = [] + for s in json: + if 'file' in s.keys(): + src = s['file'] + else: + src = s['src'] + sources.append((src, s.get('label'))) else: - sources =[(block.replace('"',''), '')] + if 'file:' in block: + sources = scrapertools.find_multiple_matches(block, r'file:\s*"([^"]+)"(?:,label:\s*"([^"]+)")?') + elif 'src:' in block: + sources = scrapertools.find_multiple_matches(block, r'src:\s*"([^"]+)",\s*type:\s*"[^"]+"(?:,[^,]+,\s*label:\s*"([^"]+)")?') + else: + sources =[(block.replace('"',''), '')] for url, quality in sources: quality = 'auto' if not quality else quality if url.split('.')[-1] != 'mpd': diff --git a/servers/fembed.py b/servers/fembed.py index 45e6e28f..c7d65aad 100644 --- a/servers/fembed.py +++ b/servers/fembed.py @@ -10,11 +10,14 @@ def test_video_exists(page_url): global data # page_url = re.sub('://[^/]+/', '://feurl.com/', page_url) - data = httptools.downloadpage(page_url).data + page = httptools.downloadpage(page_url) + data = page.data if "Sorry 404 not found" in data or "This video is unavailable" in data or "Sorry this video is unavailable:" in data: return False, config.get_localized_string(70449) % "fembed" - page_url = page_url.replace("/f/","/v/") - page_url = page_url.replace("/v/","/api/source/") + + page_url = page.url + page_url = page_url.replace("/f/", "/v/") + page_url = page_url.replace("/v/", "/api/source/") data = httptools.downloadpage(page_url, post={}).json logger.debug(data) if "Video not found or" in data or "We are encoding this video" in data: @@ -26,9 +29,9 @@ def get_video_url(page_url, user="", password="", video_password=""): logger.debug("(page_url='%s')" % page_url) video_urls = [] for file in data['data']: - media_url = file['file'] - label = file['label'] - extension = file['type'] - video_urls.append([ extension + ' ' + label + ' [Fembed]', media_url]) + media_url = file['file'] + label = file['label'] + extension = file['type'] + video_urls.append([ extension + ' ' + label + ' [Fembed]', media_url]) video_urls.sort(key=lambda x: int(x[0].split()[1].replace('p',''))) return video_urls diff --git a/servers/hxfile.json b/servers/hxfile.json new file mode 100644 index 00000000..4bba8484 --- /dev/null +++ b/servers/hxfile.json @@ -0,0 +1,41 @@ +{ + "active": true, + "find_videos": { + "ignore_urls": [], + "patterns": [ + { + "pattern": "https?://hxfile.co/(?!api)(?:embed-)?([A-z0-9]+)", + "url": "https://hxfile.co/embed-\\1.html" + } + ] + }, + "free": true, + "id": "hxfile", + "name": "HxFile", + "settings": [ + { + "default": false, + "enabled": true, + "id": "black_list", + "label": "@70708", + "type": "bool", + "visible": true + }, + { + "default": 0, + "enabled": true, + "id": "favorites_servers_list", + "label": "@60655", + "lvalues": [ + "No", + "1", + "2", + "3", + "4", + "5" + ], + "type": "list", + "visible": false + } + ] +} \ No newline at end of file diff --git a/servers/hxfile.py b/servers/hxfile.py new file mode 100644 index 00000000..487463c8 --- /dev/null +++ b/servers/hxfile.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- + +from core import httptools, scrapertools, servertools, support +from platformcode import logger, config +from lib import jsunpack + + +def test_video_exists(page_url): + logger.debug("(page_url='%s')" % page_url) + global data + data = httptools.downloadpage(page_url).data + if "Can't create video code" in data: + return False, config.get_localized_string(70292) % 'HxFile' + return True, "" + + +def get_video_url(page_url, premium=False, user="", password="", video_password=""): + logger.debug("url=" + page_url) + global data + video_urls = [] + packed = scrapertools.find_single_match(data, r'(eval\s?\(function\(p,a,c,k,e,d\).*?\n)') + data = jsunpack.unpack(packed) + video_urls.extend(support.get_jwplayer_mediaurl(data, 'HxFile')) + + return video_urls diff --git a/servers/playtube.json b/servers/playtube.json new file mode 100644 index 00000000..f7eaf1f7 --- /dev/null +++ b/servers/playtube.json @@ -0,0 +1,43 @@ +{ + "active": true, + "find_videos": { + "ignore_urls": [], + "patterns": [ + { + "pattern": "playtube.ws/(?:embed-|)(\\w+)", + "url": "https://playtube.ws/embed-\\1.html" + } + + ] + }, + "free": true, + "id": "playtube", + "name": "playtube", + "settings": [ + { + "default": false, + "enabled": true, + "id": "black_list", + "label": "@60654", + "type": "bool", + "visible": true + }, + { + "default": 0, + "enabled": true, + "id": "favorites_servers_list", + "label": "@60655", + "lvalues": [ + "No", + "1", + "2", + "3", + "4", + "5" + ], + "type": "list", + "visible": false + } + ], + "thumbnail": "https://i.postimg.cc/8CVV6DnF/playtube.png" +} \ No newline at end of file diff --git a/servers/playtube.py b/servers/playtube.py new file mode 100644 index 00000000..6f7c64d6 --- /dev/null +++ b/servers/playtube.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# -------------------------------------------------------- +# Conector playtube By Alfa development Group +# -------------------------------------------------------- +import re +import codecs +from core import httptools +from core import scrapertools +from lib import jsunpack +from platformcode import logger + + +def test_video_exists(page_url): + logger.info("(page_url='%s')" % page_url) + global data + data = httptools.downloadpage(page_url) + if data.code == 404 or "File is no longer available" in data.data: + return False, "[playtube] El archivo no existe o ha sido borrado" + return True, "" + + +def get_video_url(page_url, premium=False, user="", password="", video_password=""): + logger.info("url=" + page_url) + video_urls = [] + pack = scrapertools.find_single_match(data.data, 'p,a,c,k,e,d.*?') + unpacked = jsunpack.unpack(pack) + url = scrapertools.find_single_match(unpacked, 'file:"([^"]+)') + "|referer=%s" %(page_url) + video_urls.append(['m3u8 [playtube]', url] ) + return video_urls \ No newline at end of file diff --git a/servers/vidmoly.json b/servers/vidmoly.json index e8d4d984..aba842f8 100644 --- a/servers/vidmoly.json +++ b/servers/vidmoly.json @@ -4,7 +4,7 @@ "ignore_urls": [], "patterns": [ { - "pattern": "https?://vidmoly.net/(?:embed-)?(\\w+)\\.html", + "pattern": "https?://vidmoly.(?:net|to)/(?:embed-)?(\\w+)\\.html", "url": "https://vidmoly.net/embed-\\1.html" }, { diff --git a/servers/youdbox.json b/servers/youdbox.json index aff46ba7..669d6b26 100644 --- a/servers/youdbox.json +++ b/servers/youdbox.json @@ -4,8 +4,8 @@ "ignore_urls": [], "patterns": [ { - "pattern": "(https://youdbox.com/embed-[A-z0-9-]+.html)", - "url": "\\1" + "pattern": "https://youdbox.(?:com|net)/embed-([A-z0-9-]+.html)", + "url": "https://youdbox.net/embed-\\1" } ] }, diff --git a/servers/youdbox.py b/servers/youdbox.py index a371e7d3..5cb09722 100644 --- a/servers/youdbox.py +++ b/servers/youdbox.py @@ -1,14 +1,21 @@ # -*- coding: utf-8 -*- +# import re from core import httptools from core import scrapertools from platformcode import logger +import codecs def get_video_url(page_url, video_password): - logger.debug("(page_url='%s')" % page_url) + logger.info("(page_url='%s')" % page_url) video_urls = [] data = httptools.downloadpage(page_url).data - url = scrapertools.find_single_match(data, '