Here are the examples of the python api core.scrapertools.find_single_match.replace taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
6 Examples
3
View Complete Implementation : thumbzilla.py
Copyright GNU General Public License v3.0
Author : alfa-addon
Copyright GNU General Public License v3.0
Author : alfa-addon
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t| |<br>", "", data)
patron = '<li clast="sickstars">.*?<a href="([^"]+)".*?'
patron += '<img src="([^"]+)" alt="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedsatle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
itemlist.append(Item(channel=item.channel, action="videos", url=url, satle=scrapedsatle, fanart=scrapedthumbnail,
thumbnail=scrapedthumbnail, viewmode="movie_with_plot"))
paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />').replace('amp;', '')
if paginacion:
itemlist.append(Item(channel=item.channel, action="catalogo",
thumbnail=thumbnail % 'rarrow',
satle="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
return itemlist
3
View Complete Implementation : thumbzilla.py
Copyright GNU General Public License v3.0
Author : alfa-addon
Copyright GNU General Public License v3.0
Author : alfa-addon
def play(item):
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, '"quality":"[^"]+","videoUrl":"([^"]+)"').replace('\\', '')
itemlist.append(item.clone(url=url, satle=item.contentTile))
return itemlist
0
View Complete Implementation : favorites.py
Copyright GNU General Public License v3.0
Author : alfa-addon
Copyright GNU General Public License v3.0
Author : alfa-addon
def mainlist(item):
logger.info()
itemlist = []
for name, thumb, data in read_favourites():
if "plugin://plugin.video.%s/?" % config.PLUGIN_NAME in data:
url = scrapertools.find_single_match(data, 'plugin://plugin.video.%s/\?([^;]*)' % config.PLUGIN_NAME) \
.replace(""", "")
item = Item().fromurl(url)
item.satle = name
item.thumbnail = thumb
item.isFavourite = True
if type(item.context) == str:
item.context = item.context.split("|")
elif type(item.context) != list:
item.context = []
item.context.extend([{"satle": config.get_localized_string(30154), # "Quitar de favoritos"
"action": "delFavourite",
"channel": "favorites",
"from_satle": item.satle},
{"satle": "Renombrar",
"action": "renameFavourite",
"channel": "favorites",
"from_satle": item.satle}
])
# logger.debug(item.tostring('\n'))
itemlist.append(item)
return itemlist
0
View Complete Implementation : vimpleru.py
Copyright GNU General Public License v3.0
Author : alfa-addon
Copyright GNU General Public License v3.0
Author : alfa-addon
def get_video_url(page_url, premium=False, user="", pastword="", video_pastword=""):
logger.info("(page_url=%s)" % page_url)
data = httptools.downloadpage(page_url).data
media_url = scrapertools.find_single_match(data, '"video"[^,]+,"url":"([^"]+)"').replace('\\', '')
data_cookie = config.get_cookie_data()
cfduid = scrapertools.find_single_match(data_cookie, '.vimple.ru.*?(__cfduid\t[a-f0-9]+)') \
.replace('\t', '=')
univid = scrapertools.find_single_match(data_cookie, '.vimple.ru.*?(UniversalUserID\t[a-f0-9]+)') \
.replace('\t', '=')
media_url += "|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0" \
"&Cookie=%s; %s" % (cfduid, univid)
video_urls = []
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [vimple.ru]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls
0
View Complete Implementation : descargasmix.py
Copyright GNU General Public License v3.0
Author : pelisalacarta-ce
Copyright GNU General Public License v3.0
Author : pelisalacarta-ce
def play(item):
logger.info()
itemlist = []
if not item.url.startswith("http") and not item.url.startswith("magnet"):
post = "source=%s&action=obtenerurl" % urllib.quote(item.url)
headers = {'X-Requested-With': 'XMLHttpRequest'}
data = httptools.downloadpage("%s/wp-admin/admin-ajax.php" % host.replace("https", "http"), post=post, headers=headers, follow_redirects=False).data
url = scrapertools.find_single_match(data, 'url":"([^"]+)"').replace("\\", "")
if "enlacesmix" in url:
data = httptools.downloadpage(url, headers={'Referer': item.extra}, follow_redirects=False).data
url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"')
enlaces = servertools.findvideosbyserver(url, item.server)
if enlaces:
itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1]))
else:
itemlist.append(item.clone())
return itemlist
0
View Complete Implementation : favoritos.py
Copyright GNU General Public License v3.0
Author : pelisalacarta-ce
Copyright GNU General Public License v3.0
Author : pelisalacarta-ce
def mainlist(item):
logger.info()
itemlist = []
for name, thumb, data in read_favourites():
if "plugin://plugin.video.%s/?" % config.PLUGIN_NAME in data:
url = scrapertools.find_single_match(data, 'plugin://plugin.video.%s/\?([^;]*)' % config.PLUGIN_NAME)\
.replace(""", "")
item = Item().fromurl(url)
item.satle = name
item.thumbnail = thumb
item.isFavourite = True
if type(item.context) == str:
item.context = item.context.split("|")
elif type(item.context) != list:
item.context = []
item.context.extend([{"satle": config.get_localized_string(30154), # "Quitar de favoritos"
"action": "delFavourite",
"channel": "favoritos",
"from_satle": item.satle},
{"satle": "Renombrar",
"action": "renameFavourite",
"channel": "favoritos",
"from_satle": item.satle}
])
# logger.debug(item.tostring('\n'))
itemlist.append(item)
return itemlist