some fixes to greko's pull
This commit is contained in:
@@ -96,7 +96,7 @@ def episodios(item):
|
||||
logger.info("blocks log: %s" % ( blocks ))
|
||||
for block in blocks:
|
||||
season_n, episode_n = scrapertoolsV2.find_single_match(block, r'(\d+)(?:×|×)(\d+)')
|
||||
titolo = scrapertoolsV2.find_single_match(block, r'[&#;]\d+[ ](?:([a-zA-Z0-9;&#\s]+))[ ]?(?:[^<>])')
|
||||
titolo = scrapertoolsV2.find_single_match(block, r'[&#;]\d+[ ]([a-zA-Z0-9;&#.\s]+)[ ]?[^<>]')
|
||||
logger.info("block log: %s" % ( block ))
|
||||
|
||||
titolo = re.sub(r'×|×', "x", titolo).replace("’","'")
|
||||
@@ -166,32 +166,4 @@ def findvideos(item):
|
||||
|
||||
itemlist = support.server(item, item.url)
|
||||
|
||||
|
||||
"""
|
||||
Questa parte funziona se non vanno bene le modifiche a support
|
||||
"""
|
||||
## support.log()
|
||||
## itemlist =[]
|
||||
## data= ''
|
||||
## logger.info("Url item.url: [%s] " % item.url)
|
||||
##
|
||||
## urls = scrapertoolsV2.find_multiple_matches(item.url, r'href="([^"]+)"')
|
||||
## itemlist = servertools.find_video_items(data=str(urls))
|
||||
##
|
||||
## for videoitem in itemlist:
|
||||
## videoitem.title = item.title + ' - [COLOR limegreen][[/COLOR]'+ videoitem.title+ ' [COLOR limegreen]][/COLOR]'
|
||||
## videoitem.fulltitle = item.fulltitle
|
||||
## videoitem.thumbnail = item.thumbnail
|
||||
## videoitem.show = item.show
|
||||
## videoitem.plot = item.plot
|
||||
## videoitem.channel = item.channel
|
||||
## videoitem.contentType = item.contentType
|
||||
##
|
||||
## # Controlla se i link sono validi
|
||||
## if __comprueba_enlaces__:
|
||||
## itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
##
|
||||
## # richiesto per AutoPlay
|
||||
## autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -152,7 +152,7 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
|
||||
title = scrapertoolsV2.decodeHtmlentities(scraped["title"]).strip()
|
||||
plot = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["plot"]))
|
||||
|
||||
if (scraped["quality"] and scraped["episode"]): # by greko aggiunto episode
|
||||
if scraped["quality"] and scraped["episode"]: # by greko aggiunto episode
|
||||
longtitle = '[B]' + title + '[/B] - [B]' + scraped["episode"] + '[/B][COLOR blue][' + scraped["quality"] + '][/COLOR]' # by greko aggiunto episode
|
||||
elif scraped["episode"]: # by greko aggiunto episode
|
||||
longtitle = '[B]' + title + '[/B] - [B]' + scraped["episode"] + '[/B]' # by greko aggiunto episode
|
||||
@@ -450,16 +450,17 @@ def videolibrary(itemlist, item, typography=''):
|
||||
contentType = 'tvshow'
|
||||
|
||||
title = typo(config.get_localized_string(30161) + ' ' + typography)
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title=title,
|
||||
contentType=contentType,
|
||||
contentSerieName=item.fulltitle if contentType == 'tvshow' else '',
|
||||
url=item.url,
|
||||
action=action,
|
||||
extra=extra,
|
||||
contentTitle=item.fulltitle))
|
||||
if inspect.stack()[1][3] == 'findvideos' and contentType == 'movie' or inspect.stack()[1][3] != 'findvideos' and contentType != 'movie':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title=title,
|
||||
contentType=contentType,
|
||||
contentSerieName=item.fulltitle if contentType == 'tvshow' else '',
|
||||
url=item.url,
|
||||
action=action,
|
||||
extra=extra,
|
||||
contentTitle=item.fulltitle))
|
||||
|
||||
|
||||
def nextPage(itemlist, item, data, patron, function_level=1):
|
||||
@@ -493,12 +494,7 @@ def server(item, data='', headers='', AutoPlay=True, CheckLinks=True):
|
||||
if not data:
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
## fix by greko
|
||||
# se inviamo un blocco di url dove cercare i video
|
||||
if type(data) == list:
|
||||
data = str(item.url)
|
||||
else:
|
||||
# se inviamo un singolo url dove cercare il video
|
||||
data = item.url
|
||||
data = str(item.url)
|
||||
## FINE fix by greko
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
@@ -51,10 +51,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
#fix by greko inizio
|
||||
if not data:
|
||||
data = scrapertoolsV2.find_single_match(idata, 'action="(?:[^/]+.*?/[^/]+/([a-zA-Z0-9_]+))">')
|
||||
if '/olink/' in url or '/delta/' in url or '/mango/' in url or '/now/' in url:
|
||||
from lib import unshortenit
|
||||
data, status = unshortenit.unshorten(url)
|
||||
logger.info("Data - Status zcrypt linkup : [%s] [%s] " %(data, status))
|
||||
from lib import unshortenit
|
||||
data, status = unshortenit.unshorten(url)
|
||||
logger.info("Data - Status zcrypt linkup : [%s] [%s] " %(data, status))
|
||||
# fix by greko fine
|
||||
else:
|
||||
data = ""
|
||||
|
||||
@@ -6,6 +6,10 @@
|
||||
{
|
||||
"pattern": "vidto.me/(?:embed-|)([A-z0-9]+)",
|
||||
"url": "http://vidto.me/embed-\\1.html"
|
||||
},
|
||||
{
|
||||
"pattern": "vidtome.stream/(?:embed-|)([A-z0-9]+)",
|
||||
"url": "http://vidtome.stream/\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "vidtome.stream/(?:embed-|)([A-z0-9]+)",
|
||||
"url": "http://vidtome.stream/\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "vidtomestream",
|
||||
"name": "vidtomestream",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- copiato e adattato da vidtome -*-
|
||||
# -*- by Greko -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "Not Found" in data or "File Does not Exist" in data:
|
||||
return False, "[vidtomestream] Il video non esiste o ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
code = scrapertools.find_single_match(data, 'name="code" value="([^"]+)')
|
||||
hash = scrapertools.find_single_match(data, 'name="hash" value="([^"]+)')
|
||||
post = "op=download1&code=%s&hash=%s&imhuman=Proceed+to+video" %(code, hash)
|
||||
data1 = httptools.downloadpage("http://m.vidtome.stream/playvideo/%s" %code, post=post).data
|
||||
video_urls = []
|
||||
media_urls = scrapertools.find_multiple_matches(data1, 'file: "([^"]+)')
|
||||
for media_url in media_urls:
|
||||
ext = scrapertools.get_filename_from_url(media_url)[-4:]
|
||||
video_urls.append(["%s [vidtomestream]" % (ext), media_url])
|
||||
video_urls.reverse()
|
||||
for video_url in video_urls:
|
||||
logger.info("%s" % (video_url[0]))
|
||||
return video_urls
|
||||
Reference in New Issue
Block a user