openscrapers.modules.source_utils.get_release_quality - python examples

Here are the examples of the python api openscrapers.modules.source_utils.get_release_quality taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

44 Examples 7

3 View Complete Implementation : kickass2.py
Copyright GNU General Public License v3.0
Author : a4k-openproject
	def _get_sources(self, item):
		try:
			name = item[0]
			url = item[1]

			if any(x in url.lower() for x in ['french', 'italian', 'spanish', 'truefrench', 'dublado', 'dubbed']):
				return

			quality, info = source_utils.get_release_quality(name, url)

			info.append(item[2])  # if item[2] != '0'
			info = ' | '.join(info)

			self._sources.append({'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url,
												'info': info, 'direct': False, 'debridonly': True})

		except:
			source_utils.scraper_error('KICKast2')
			past

0 View Complete Implementation : more_sources.py
Copyright GNU General Public License v3.0
Author : a4k-openproject
def more_vidnode(link, hostDict):
	sources = []  # By Shellc0de
	try:
		headers = {'Host': 'vidnode.net',
		           'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
		           'Upgrade-Insecure-Requests': '1',
		           'Accept-Language': 'en-US,en;q=0.9'
		           }
		response = client.request(link, headers=headers, timeout=5)
		urls = re.findall('''\{file:\s*['"]([^'"]+).*?label:\s*['"](\d+\s*P)['"]''', response, re.DOTALL | re.I)
		if urls:
			for url, qual in urls:
				quality, info = source_utils.get_release_quality(qual, url)
				host = url.split('//')[1].replace('www.', '')
				host = host.split('/')[0].lower()  # 'CDN'
				sources.append(
					{'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': True,
					 'debridonly': False})
		return sources
	except:
		return sources

0 View Complete Implementation : more_sources.py
Copyright GNU General Public License v3.0
Author : a4k-openproject
def more_vidlink(link, hostDict):
	sources = []  # By Shellc0de
	try:
		ua = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'}
		postID = link.split('/embed/')[1]
		post_link = 'https://vidlink.org/embed/update_views'
		payload = {'postID': postID}
		headers = ua
		headers['X-Requested-With'] = 'XMLHttpRequest'
		headers['Referer'] = link
		ihtml = client.request(post_link, post=payload, headers=headers)
		linkcode = jsunpack.unpack(ihtml).replace('\\', '')
		try:
			extra_link = re.findall(r'var oploadID="(.+?)"', linkcode)[0]
			oload = 'https://openload.co/embed/' + extra_link
			sources.append(
				{'source': 'openload.co', 'quality': '1080p', 'language': 'en', 'url': oload, 'direct': False,
				 'debridonly': False})
		except Exception:
			past
		links = re.findall(r'var file1="(.+?)"', linkcode)[0]
		stream_link = links.split('/pl/')[0]
		headers = {'Referer': 'https://vidlink.org/',
		           'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'}
		response = client.request(links, headers=headers)
		urls = re.findall(r'[A-Z]{10}=\d+x(\d+)\W[A-Z]+=\"\w+\"\s+\/(.+?)\.', response)
		if urls:
			for qual, url in urls:
				url = stream_link + '/' + url + '.m3u8'
				quality, info = source_utils.get_release_quality(qual, url)
				sources.append(
					{'source': 'GVIDEO', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': True,
					 'debridonly': False})
		return sources
	except:
		return sources

0 View Complete Implementation : more_sources.py
Copyright GNU General Public License v3.0
Author : a4k-openproject
def more_gomo(link, hostDict):
	sources = []  # By Mpie
	try:
		gomo_link = 'https://gomostream.com/decoding_v3.php'
		User_Agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
		result = client.request(link)
		tc = re.compile('tc = \'(.+?)\';').findall(result)[0]
		if (tc):
			token = re.compile('"_token": "(.+?)",').findall(result)[0]
			post = {'tokenCode': tc, '_token': token}

			def tsd(tokenCode):
				_13x48X = tokenCode
				_71Wxx199 = _13x48X[4:18][::-1]
				return _71Wxx199 + "18" + "432782"

			headers = {'Host': 'gomostream.com', 'Referer': link, 'User-Agent': User_Agent, 'x-token': tsd(tc)}
			result = client.request(gomo_link, XHR=True, post=post, headers=headers)
			urls = json.loads(result)
			for url in urls:
				if 'gomostream' in url:
					continue
					# sources.append({'source': 'CDN', 'quality': 'SD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
				else:
					quality, info = source_utils.get_release_quality(url, url)
					valid, host = source_utils.is_host_valid(url, hostDict)
					sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
					                'direct': False, 'debridonly': False})
		return sources
	except:
		return sources

0 View Complete Implementation : filmpalast.py
Copyright GNU General Public License v3.0
Author : a4k-openproject
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if not url:
				return sources
			query = urlparse.urljoin(self.base_link, url)
			r = self.scraper.get(query).content
			quality = dom_parser.parse_dom(r, 'span', attrs={'id': 'release_text'})[0].content.split(' ')[0]
			quality, info = source_utils.get_release_quality(quality)
			r = dom_parser.parse_dom(r, 'ul', attrs={'clast': 'currentStreamLinks'})
			r = [(dom_parser.parse_dom(i, 'p', attrs={'clast': 'hostName'}),
			      dom_parser.parse_dom(i, 'a', attrs={'clast': 'stream-src'}, req='data-id')) for i in r]
			r = [(re.sub(' hd$', '', i[0][0].content.lower()), [x.attrs['data-id'] for x in i[1]]) for i in r if
			     i[0] and i[1]]
			for hoster, id in r:
				valid, hoster = source_utils.is_host_valid(hoster, hostDict)
				if not valid: continue
				sources.append({'source': hoster, 'quality': quality, 'language': 'de',
				                'info': ' | '.join(info + ['' if len(id) == 1 else 'multi-part']), 'url': id,
				                'direct': False, 'debridonly': False, 'checkquality': True})
			return sources
		except:
			return sources

0 View Complete Implementation : iload.py
Copyright GNU General Public License v3.0
Author : a4k-openproject
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if not url:
				return sources
			query = urlparse.urljoin(self.base_link, url)
			r = self.scraper.get(query).content
			r = dom_parser.parse_dom(r, 'div', attrs={'id': 'Module'})
			r = [(r, dom_parser.parse_dom(r, 'a', attrs={'href': re.compile('[^\'"]*xrel_search_query[^\'"]*')},
			                              req='href'))]
			r = [(i[0], i[1][0].attrs['href'] if i[1] else '') for i in r]
			rels = dom_parser.parse_dom(r[0][0], 'a', attrs={'href': re.compile('[^\'"]*ReleaseList[^\'"]*')},
			                            req='href')
			if rels and len(rels) > 1:
				r = []
				for rel in rels:
					relData = self.scraper.get(urlparse.urljoin(self.base_link, rel.attrs['href'])).content
					relData = dom_parser.parse_dom(relData, 'table', attrs={'clast': 'release-list'})
					relData = dom_parser.parse_dom(relData, 'tr', attrs={'clast': 'row'})
					relData = [(dom_parser.parse_dom(i, 'td', attrs={'clast': re.compile('[^\'"]*list-name[^\'"]*')}),
					            dom_parser.parse_dom(i, 'img', attrs={'clast': 'countryflag'}, req='alt'),
					            dom_parser.parse_dom(i, 'td', attrs={'clast': 'release-types'})) for i in relData]
					relData = [(i[0][0].content, i[1][0].attrs['alt'].lower(), i[2][0].content) for i in relData if
					           i[0] and i[1] and i[2]]
					relData = [(i[0], i[2]) for i in relData if i[1] == 'deutsch']
					relData = [(i[0], dom_parser.parse_dom(i[1], 'img', attrs={'clast': 'release-type-stream'})) for i
					           in relData]
					relData = [i[0] for i in relData if i[1]]
					# relData = dom_parser.parse_dom(relData, 'a', req='href')[:3]
					relData = dom_parser.parse_dom(relData, 'a', req='href')
					for i in relData:
						i = self.scraper.get(urlparse.urljoin(self.base_link, i.attrs['href'])).content
						i = dom_parser.parse_dom(i, 'div', attrs={'id': 'Module'})
						i = [(i, dom_parser.parse_dom(i, 'a',
						                              attrs={'href': re.compile('[^\'"]*xrel_search_query[^\'"]*')},
						                              req='href'))]
						r += [(x[0], x[1][0].attrs['href'] if x[1] else '') for x in i]
			r = [(dom_parser.parse_dom(i[0], 'div', attrs={'id': 'ModuleReleaseDownloads'}), i[1]) for i in r]
			r = [(dom_parser.parse_dom(i[0][0], 'a', attrs={'clast': re.compile('.*-stream.*')}, req='href'), i[1]) for
			     i in r if len(i[0]) > 0]
			for items, rel in r:
				rel = urlparse.urlparse(rel).query
				rel = urlparse.parse_qs(rel)['xrel_search_query'][0]
				quality, info = source_utils.get_release_quality(rel)
				items = [(i.attrs['href'], i.content) for i in items]
				items = [(i[0], dom_parser.parse_dom(i[1], 'img', req='src')) for i in items]
				items = [(i[0], i[1][0].attrs['src']) for i in items if i[1]]
				items = [(i[0], re.findall('.+/(.+\.\w+)\.\w+', i[1])) for i in items]
				items = [(i[0], i[1][0]) for i in items if i[1]]
				info = ' | '.join(info)
				for link, hoster in items:
					valid, hoster = source_utils.is_host_valid(hoster, hostDict)
					if not valid: continue
					sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': link, 'info': info,
					                'direct': False, 'debridonly': False, 'checkquality': True})
			return sources
		except:
			return sources

0 View Complete Implementation : movie2k.py
Copyright GNU General Public License v3.0
Author : a4k-openproject
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if not url:
				return sources
			query = urlparse.urljoin(self.base_link, url)
			r = client.request(query)
			r = dom_parser.parse_dom(r, 'div', attrs={'id': 'tab-plot_german'})
			r = dom_parser.parse_dom(r, 'tbody')
			r = dom_parser.parse_dom(r, 'tr')
			for i in r:
				if re.search('(?<=">)(\n.*?)(?=<\/a>)', i[1]).group().strip():
					hoster = re.search('(?<=">)(\n.*?)(?=<\/a>)', i[1]).group().strip()
					link = re.search('(?<=href=\")(.*?)(?=\")', i[1]).group()
					rel = re.search('(?<=oddCell qualityCell">)(\n.*?)(?=<\/td>)', i[1]).group().strip()
					quality, info = source_utils.get_release_quality(rel)
					if not quality:
						quality = 'SD'
					valid, hoster = source_utils.is_host_valid(hoster, hostDict)
					if not valid: continue
					sources.append(
						{'source': hoster, 'quality': quality, 'language': 'de', 'url': link, 'direct': False,
						 'debridonly': False})
			return sources
		except:
			return sources

0 View Complete Implementation : 123movieshubz.py
Copyright GNU General Public License v3.0
Author : a4k-openproject
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []
			hostDict = hostprDict + hostDict
			if url is None:
				return sources
			r = self.scraper.get(url).content
			qual = re.findall(">(\w+)<\/p", r)
			for i in qual:
				quality, info = source_utils.get_release_quality(i, i)
			r = dom_parser.parse_dom(r, 'div', {'id': 'servers-list'})
			r = [dom_parser.parse_dom(i, 'a', req=['href']) for i in r if i]
			for i in r[0]:
				url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'],
				       'data-name': i.attrs['data-name']}
				url = urllib.urlencode(url)
				valid, host = source_utils.is_host_valid(i.content, hostDict)
				if source_utils.limit_hosts() is True and host in str(sources):
					continue
				if valid:
					sources.append({'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url,
					                'direct': False, 'debridonly': False})
			return sources
		except:
			return sources

0 View Complete Implementation : allucxyz.py
Copyright GNU General Public License v3.0
Author : a4k-openproject
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []
			if url is None:
				return sources
			hostDict = hostDict + hostprDict
			sourcePage = self.scraper.get(url).content
			links = re.compile('<iframe.+?src="(.+?)"', re.DOTALL).findall(sourcePage)
			for link in links:
				if "gomostream.com" in link:
					for source in more_sources.more_gomo(link, hostDict):
						sources.append(source)
				else:
					quality, info = source_utils.get_release_quality(link, link)
					valid, host = source_utils.is_host_valid(link, hostDict)
					sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'info': info,
					                'direct': False, 'debridonly': False})
			return sources
		except:
			return sources

0 View Complete Implementation : cmovieshd.py
Copyright GNU General Public License v3.0
Author : a4k-openproject
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []
			url = url + 'watch/'
			r = client.request(url)
			qual = re.compile('clast="quality">(.+?)<').findall(r)
			for i in qual:
				quality, info = source_utils.get_release_quality(i, i)
			r = client.parseDOM(r, "div", attrs={"id": "list-eps"})
			for i in r:
				t = re.compile('<a href="(.+?)"').findall(i)
				for url in t:
					t = client.request(url)
					t = client.parseDOM(t, "div", attrs={"id": "content-embed"})
					for u in t:
						i = re.findall('src="(.+?)"', u)[0].replace('load_player.html?e=', 'episode/embed/')
						i = client.request(i).replace("\\", "")
						u = re.findall('"(https.+?)"', i)
						for url in u:
							valid, host = source_utils.is_host_valid(url, hostDict)
							sources.append(
								{'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url,
								 'direct': False, 'debridonly': False})
			return sources
		except:
			return sources