- (.*?)
- .*?(.*?)
([^<]+)
.*?' + patron += r']+> (.*?)<\/p>'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for i, (scrapedurl, scrapedtitle, scrapedthumbnail, scrapedplot) in enumerate(matches):
+ if (p - 1) * minpage > i: continue
+ if i >= p * minpage: break
+ scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
+
+ itemlist.append(
+ Item(channel=channel,
+ action="episodios",
+ contentType="episode",
+ title=scrapedtitle,
+ fulltitle=scrapedtitle,
+ url=scrapedurl,
+ show=scrapedtitle,
+ thumbnail=scrapedthumbnail,
+ plot=scrapedplot,
+ folder=True))
+
+ if len(matches) >= p * minpage:
+ scrapedurl = item.url + '{}' + str(p + 1)
+ itemlist.append(
+ Item(channel=channel,
+ args=item.args,
+ action="insert",
+ title="[COLOR blue][B]Successivo >[/B][/COLOR]",
+ url=scrapedurl,
+ thumbnail="thumb_next.png",
+ folder=True))
+
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+ return itemlist
+
+#----------------------------------------------------------------------------------------------------------------------------------------------
+
+def updates(item):
+ logger.info("[toonitalia.py] updates")
+ itemlist = []
+
+ data = httptools.downloadpage(item.url, headers=headers).data
+
+ blocco = r'Aggiornamenti(.*?)'
+ matches = re.compile(blocco, re.DOTALL).findall(data)
+ for scrapedurl in matches:
+ blocco = scrapedurl
+
+ patron = r'(.*?)'
+ matches = re.compile(patron, re.DOTALL).findall(blocco)
+
+ for scrapedurl, scrapedtitle in matches:
+ scrapedplot = ""
+ scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
+ itemlist.append(
+ Item(channel=channel,
+ action="episodios",
+ contentType="episode",
+ title=scrapedtitle,
+ fulltitle=scrapedtitle,
+ url=scrapedurl,
+ show=scrapedtitle,
+ plot=scrapedplot))
+
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+ return itemlist
+
+#----------------------------------------------------------------------------------------------------------------------------------------------
+
+def most_view(item):
+ logger.info("[toonitalia.py] most_view")
+ itemlist = []
+
+ data = httptools.downloadpage(item.url, headers=headers).data
+
+ blocco = r'I piu visti(.*?)'
+ matches = re.compile(blocco, re.DOTALL).findall(data)
+ for scrapedurl in matches:
+ blocco = scrapedurl
+
+ patron = r'([^<]+)'
+ matches = re.compile(patron, re.DOTALL).findall(blocco)
+
+ for scrapedurl, scrapedtitle in matches:
+ scrapedplot = ""
+ scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
+ itemlist.append(
+ Item(channel=channel,
+ action="episodios",
+ contentType="episode",
+ title=scrapedtitle,
+ fulltitle=scrapedtitle,
+ url=scrapedurl,
+ show=scrapedtitle,
+ plot=scrapedplot))
+
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+ return itemlist
+
+#----------------------------------------------------------------------------------------------------------------------------------------------
+
+def list(item):
+ logger.info("[toonitalia.py] list")
+ itemlist = []
+ minpage = 14
+
+ p = 1
+ if '{}' in item.url:
+ item.url, p = item.url.split('{}')
+ p = int(p)
+
+ data = httptools.downloadpage(item.url, headers=headers).data
+
+ patron = r' ([^<]+) 2x>LoMo?`G}I#eYAK8F*)qU0LbkKuzIPQ;r~b)1WSdVPR}
zyJRRD=Fen#2P(*2GU7$R2&Ke4=5X?5w9Ek`%mEe52f=cV=dK-rafcCb@XAI-3$1*B
z!IuF7hmHe>^(=Qdv7BXW!#t(00~}O%j>U1 BSzAq2TPhhWcBLb&mGq;i>Hj0siW(pQRg$G
z^-VZ@u%txj1ErA4ju|PX$@(_chpSE>o;sEC%~~NRS7gcD`dnGqm?vvH%H=YBkv(w!
zLV4lF%VeLboWob1FJC;Wkh%XV`S_kK^35aH7^L3ab%mU~d6T6W9AFHntt;fs+c&Gy
zx>c3e&GPNRo$|9IcgS}KZj_H6*dotdGDr5Vn]+src="([^"]+)" class[^>]+>.*?'
+ patron += r'
([^<]+)
.*?
]+>([^<]+)'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for scrapedurl, scrapedtitle in matches:
+ if 'Wikipedia' not in scrapedurl:
+ scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).replace("×", "x")
+ scrapedtitle = scrapedtitle.replace("_", " ")
+ scrapedtitle = scrapedtitle.replace(".mp4", "")
+ puntata = scrapertools.find_single_match(scrapedtitle, '[0-9]+x[0-9]+')
+ for i in itemlist:
+ if i.args == puntata: #è già stata aggiunta
+ i.url += " " + scrapedurl
+ break
+
+ else:
+ itemlist.append(
+ Item(channel=channel,
+ action="findvideos",
+ contentType=item.contentType,
+ title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
+ thumbnail=item.thumbnail,
+ fulltitle=scrapedtitle,
+ url=scrapedurl,
+ args = puntata,
+ show=item.show,
+ plot=item.plot))
+
+ support.videolibrary(itemlist, item, 'color kod')
+
+ return itemlist
+
+#----------------------------------------------------------------------------------------------------------------------------------------------
+
+def search(item, texto):
+ logger.info("[toonitalia.py] " + item.url + " search " + texto)
+ item.url = host + "/?s=" + texto
+ try:
+ return peliculas(item)
+
+ except:
+ import sys
+ for line in sys.exc_info():
+ logger.error("%s" % line)
+ return []
+
+#----------------------------------------------------------------------------------------------------------------------------------------------
+
+def findvideos(item):
+ logger.info("[toonitalia.py] findvideos")
+
+ if item.args == 'film':
+ data = httptools.downloadpage(item.url, headers=headers).data
+ itemlist = servertools.find_video_items(data=data)
+
+ for videoitem in itemlist:
+ videoitem.channel = channel
+ server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
+ videoitem.title = "".join(['[COLOR blue] ' + "[[B]" + server + "[/B]][/COLOR] " + item.title])
+ videoitem.thumbnail = item.thumbnail
+ videoitem.plot = item.plot
+ videoitem.fulltitle = item.fulltitle
+ videoitem.show = item.show
+
+ else:
+ itemlist = servertools.find_video_items(data=item.url)
+
+ for videoitem in itemlist:
+ videoitem.channel = channel
+ server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
+ videoitem.title = "".join(['[COLOR blue] ' + "[[B]" + server + "[/B]] " + item.title + '[/COLOR]'])
+ videoitem.thumbnail = item.thumbnail
+ videoitem.plot = item.plot
+ videoitem.fulltitle = item.fulltitle
+ videoitem.show = item.show
+
+ autoplay.start(itemlist, item)
+
+ return itemlist
diff --git a/resources/media/channels/thumb/documentaristreamingda.png b/resources/media/channels/thumb/documentaristreamingda.png
new file mode 100644
index 0000000000000000000000000000000000000000..c9342c90aff03d33087f65c879672972392fddc3
GIT binary patch
literal 177202
zcmWifXE+>R1I1@oU!oJ85Uk##vwCN-dT)sqU4$U3MTj6eS**_LqPOT`NurB?^j?B!
z5d^RI!+q||{V*TqnfuH+=XY)#Twmh>gaHBozymE!RU-fZasPi1;@_|ATgE8ePq@#N
z^zP^SB;tP#c=z{2KAIN(06;qM{{c$pBW1i_qzh0p4>0zA8W7~*=K}aRxOoK#dbtF!
zi3^GdN;qqn-#_^-QA`@zOT5D-dP2IeYQFeuwVq8;7jJ!$ZfM!$U6`8bcW3S6>e;
z4GFFxE)KQQz+s}l+#>VE&BF1(RrSSxpWYlDOMgE~I&Xh*bhEe|)^+i$+QVkC!UEXNd-InTP24~J`ihx
zpaXEw_lB=c4MnK`5a(d7tC35M$F329lB5?DIHqH%>cC6kZkYi|+FdLdC8e-U;M%y=
z=2y7?Fb?IMjl=!Mw!kH+^S%~#9m5y%-^jDKX{wf#G*2M89~^G
6wR9X?p<(-LJ;MWM`VESJ^oHFAOep3Qpg+x0)cd(CWl=IZn1)ty__
zJ-bHUf8aVx8~FT@o8+s#w_5qLgZIiG)s_3piT(1|*PfC8c>4wUAMd;%e}3(t{QBr#
z`DXu}^6`UL%IRA!kY_i~lgBQcC5NxSP~P6NQ@(y_zpADu<<#T%$x(&UX?5-1IsCW{
zow97kSnFFia_D)zBVwYO0EmMm6gmbgb=}+?PTyBeY|sAQ6^8W~(;SnE4}drq1I;nq
z319*+7UxkjwW3DMr!*gzE&!D)Cm_kda2$($ffB&MJp(Q%K%fPHfDt1R_%edwY(xBs
z@trUS64%eP1Jp;rM7@<4XPMebPN1J(q9n!!qYBXa2qaK0xM;X%s25xEk*%}g?i1h>DWUVm%
zEGSw5e*-Yjj7mc(lRzf)kJD9yvq~u?)t3Zl+>$_u+PFY~l#MEkSpmmxJ@l7ez2w
zUU`6L>Cj(5w9^faeygimZlxfzM1WF{))_3b(h^c}j%nqTpe^C!EUslgj>#*s?p)2e
zC7js>Z}R{Jc(^c$SyZ)@N#3gc1j#h~uTROmP$y$`Hh!A3ecq>uF(5O;*USVzCBetF
z=F7!Dv%t8uW%R^p3aR!>T!^}Cmpy42Cn}tL3#LBL+XNHFWu5gYGv*`gaM$mKGe#eI
z(ywZl