fix dailymotion by alfa, piccole modifiche

This commit is contained in:
marco
2020-06-09 20:15:09 +02:00
parent bdb6422218
commit f1a5fbb818
4 changed files with 24 additions and 29 deletions

View File

@@ -6,8 +6,7 @@ import inspect
import os
import re
import sys
from lib.guessit import guessit
from time import time
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
@@ -283,6 +282,7 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
longtitle = title + (s if title and title2 else '') + title2 + '\n'
if sceneTitle:
from lib.guessit import guessit
try:
parsedTitle = guessit(title)
title = longtitle = parsedTitle.get('title', '')
@@ -437,7 +437,7 @@ def scrape(func):
data = re.sub('\n|\t', ' ', data)
data = re.sub(r'>\s+<', '> <', data)
# replace all ' with " and eliminate newline, so we don't need to worry about
scrapingTime = time()
if patronBlock:
if debugBlock:
regexDbg(item, patronBlock, headers, data)
@@ -479,7 +479,6 @@ def scrape(func):
else:
break
if (pagination and len(matches) <= pag * pagination) or not pagination: # next page with pagination
if patronNext and inspect.stack()[1][3] != 'newest':
nextPage(itemlist, item, data, patronNext, function)
@@ -502,8 +501,8 @@ def scrape(func):
if action != 'play' and function != 'episodios' and 'patronMenu' not in args and item.contentType in ['movie', 'tvshow', 'episode']:
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
from specials import autorenumber
if anime:
from specials import autorenumber
if function == 'episodios' or item.action == 'episodios': autorenumber.renumber(itemlist, item, 'bold')
else: autorenumber.renumber(itemlist)
# if anime and autorenumber.check(item) == False and len(itemlist)>0 and not scrapertools.find_single_match(itemlist[0].title, r'(\d+.\d+)'):
@@ -514,7 +513,6 @@ def scrape(func):
videolibrary(itemlist, item, function=function)
if function == 'episodios' or function == 'findvideos':
download(itemlist, item, function=function)
if 'patronMenu' in args and itemlist:
itemlist = thumb(itemlist, genre=True)
@@ -526,7 +524,7 @@ def scrape(func):
if config.get_setting('trakt_sync'):
from core import trakt_tools
trakt_tools.trakt_check(itemlist)
log('scraping time: ' , time()-scrapingTime)
return itemlist
return wrapper

View File

@@ -7,8 +7,10 @@ from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
response = httptools.downloadpage(page_url)
if "Contenido rechazado" in response.data:
global response
response = httptools.downloadpage(page_url, cookies=False)
if "Pagina non trovata" in response.data:
return False, config.get_localized_string(70449) % "dailymotion"
if response.code == 404:
return False, config.get_localized_string(70449) % "dailymotion"
@@ -18,26 +20,25 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
response = httptools.downloadpage(page_url, cookies=False)
cookie = {'Cookie': response.headers["set-cookie"]}
data = response.data.replace("\\", "")
subtitle = scrapertools.find_single_match(data, r'"subtitles":.*?"es":.*?urls":\["([^"]+)"')
qualities = scrapertools.find_multiple_matches(data, r'"([^"]+)":(\[\{"type":".*?\}\])')
subtitle = scrapertools.find_single_match(data, '"subtitles":.*?"es":.*?urls":\["([^"]+)"')
qualities = scrapertools.find_multiple_matches(data, '"([^"]+)":(\[\{"type":".*?\}\])')
for calidad, urls in qualities:
patron = '"type":"(?:video|application)/([^"]+)","url":"([^"]+)"'
patron = '"type":"(?:video|application)\\/([^"]+)","url":"([^"]+)"'
matches = scrapertools.find_multiple_matches(urls, patron)
for stream_type, stream_url in matches:
stream_type = stream_type.replace('x-mpegURL', 'm3u8')
if stream_type == "mp4":
stream_url = httptools.downloadpage(stream_url, headers=cookie, only_headers=True,
follow_redirects=False).headers.get("location", stream_url)
video_urls.append(["%sp .%s [dailymotion]" % (calidad, stream_type), stream_url, 0, subtitle])
# else:
# data_m3u8 = httptools.downloadpage(stream_url).data
# stream_url_http = scrapertools.find_single_match(data_m3u8, r'(http:.*?\.m3u8)')
# if stream_url_http:
# stream_url = stream_url_http
# video_urls.append(["%sp .%s [Dailymotion]" % (calidad, stream_type), stream_url, 0, subtitle])
else:
data_m3u8 = httptools.downloadpage(stream_url).data
calidad = scrapertools.find_single_match(data_m3u8, r'NAME="([^"]+)"')
stream_url_http = scrapertools.find_single_match(data_m3u8, r'PROGRESSIVE-URI="([^"]+)"')
if stream_url_http:
stream_url = stream_url_http
video_urls.append(["%sp .%s [dailymotion]" % (calidad, stream_type), stream_url, 0, subtitle])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls
return video_urls

View File

@@ -31,12 +31,10 @@ try:
import xbmcgui
except:
xbmcgui = None
import xbmc
import re, base64, json, os, inspect
import re, base64, json, inspect
from core import jsontools, tvdb, scrapertools, filetools
from core.support import typo, log, dbg
from platformcode import config, platformtools, logger
from platformcode.config import get_setting
from core.support import typo, log
from platformcode import config, platformtools
TAG_TVSHOW_RENUMERATE = "TVSHOW_AUTORENUMBER"
TAG_ID = "ID"

View File

@@ -2,7 +2,7 @@
# -*- Channel Community -*-
import re, os, inspect, xbmcaddon, xbmcgui
import re, inspect, xbmcgui
from core import httptools, jsontools, tmdb, support, filetools
from core.item import Item
@@ -33,7 +33,6 @@ def mainlist(item):
with open(path, "w") as file:
file.write('{"channels":{}}')
file.close()
autoplay.init(item.channel, list_servers, list_quality)
return show_channels(item)
@@ -74,7 +73,6 @@ def show_channels(item):
context=context,
path=path))
autoplay.show_option(item.channel, itemlist)
support.channel_config(item, itemlist)
return itemlist