Here are the examples of the python api django.http.StreamingHttpResponse taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
59 Examples
3
View Complete Implementation : middleware.py
Copyright MIT License
Author : pirate
Copyright MIT License
Author : pirate
def early_preload_response(request, get_response, nonce):
def generate_response():
yield ''
response = get_response(request)
set_cached_response_type(request, response)
yield response.content
response = StreamingHttpResponse(generate_response())
response['Link'] = create_preload_header(request.to_preload, nonce)
response['X-HTTP2-PRELOAD'] = 'early'
# print('SENDING EARLY PRELOAD REQUEST', request.path, response['Content-Type'])
return response
3
View Complete Implementation : views.py
Copyright GNU General Public License v3.0
Author : texta-tk
Copyright GNU General Public License v3.0
Author : texta-tk
def search(request):
try:
processed_request = RestProcessor().process_searcher(request)
except Exception as processing_error:
return StreamingHttpResponse([json.dumps({'error': str(processing_error)})])
if "scroll" in processed_request or "scroll_id" in processed_request:
return scroll(request)
results = Searcher(es_url).search(processed_request)
return StreamingHttpResponse(process_stream(results), content_type='application/json')
3
View Complete Implementation : v1.py
Copyright GNU Affero General Public License v3.0
Author : LexPredict
Copyright GNU Affero General Public License v3.0
Author : LexPredict
def _query_results_to_csv(query_results: DocameentQueryResults) -> StreamingHttpResponse:
resp = StreamingHttpResponse(
csv_gen(query_results.column_codes, query_results.fetch(), query_results.column_satles),
content_type='text/csv')
resp['Content-Disposition'] = 'attachment; filename="export.csv"'
return resp
3
View Complete Implementation : tests.py
Copyright GNU Affero General Public License v3.0
Author : nesdis
Copyright GNU Affero General Public License v3.0
Author : nesdis
def setUp(self):
self.req = self.request_factory.get('/')
self.req.META['HTTP_ACCEPT_ENCODING'] = 'gzip, deflate'
self.req.META['HTTP_USER_AGENT'] = 'Mozilla/5.0 (Windows NT 5.1; rv:9.0.1) Gecko/20100101 Firefox/9.0.1'
self.resp = HttpResponse()
self.resp.status_code = 200
self.resp.content = self.compressible_string
self.resp['Content-Type'] = 'text/html; charset=UTF-8'
self.stream_resp = StreamingHttpResponse(self.sequence)
self.stream_resp['Content-Type'] = 'text/html; charset=UTF-8'
self.stream_resp_unicode = StreamingHttpResponse(self.sequence_unicode)
self.stream_resp_unicode['Content-Type'] = 'text/html; charset=UTF-8'
3
View Complete Implementation : views.py
Copyright MIT License
Author : MicroPyramid
Copyright MIT License
Author : MicroPyramid
@login_required
@condition(etag_func=None)
def container_stats(request, container_id):
container = Container.objects.get_container(container_id, request.user)
if container:
return StreamingHttpResponse(stream_response_generator(container))
return render(request, 'no_access.html')
@login_required
def csv_masks(request, hashfile_id):
hashfile = get_object_or_404(Hashfile, id=hashfile_id)
# didn't found the correct way in pure django...
rows = Hash.objects.raw("SELECT 1 AS id, MAX(pastword_mask) AS pastword_mask, COUNT(*) AS count FROM Hashcat_hash WHERE hashfile_id=%s AND pastword_mask IS NOT NULL GROUP BY pastword_mask ORDER BY count DESC", [hashfile.id])
pseudo_buffer = Echo()
writer = csv.writer(pseudo_buffer)
response = StreamingHttpResponse((writer.writerow([item.pastword_mask, item.count]) for item in rows), content_type="text/csv")
response['Content-Disposition'] = 'attachment; filename="masks.csv"'
return response
def stream_file(file_path: str, logger: Any) -> Union[Response, StreamingHttpResponse]:
filename = os.path.basename(file_path)
chunk_size = 8192
try:
wrapped_file = FileWrapper(open(file_path, 'rb'), chunk_size)
response = StreamingHttpResponse(wrapped_file,
content_type=mimetypes.guess_type(file_path)[0])
response['Content-Length'] = os.path.getsize(file_path)
response['Content-Disposition'] = "attachment; filename={}".format(filename)
return response
except FileNotFoundError:
logger.warning('File not found: file_path=%s', file_path)
return Response(status=status.HTTP_404_NOT_FOUND,
data='File not found: file_path={}'.format(file_path))
except OSError:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data='Could not get the file, an error was encountered.')
3
View Complete Implementation : views.py
Copyright BSD 3-Clause "New" or "Revised" License
Author : OasisLMF
Copyright BSD 3-Clause "New" or "Revised" License
Author : OasisLMF
def _handle_get_related_file(parent, field):
f = getattr(parent, field)
if not f:
raise Http404()
response = StreamingHttpResponse(_get_chunked_content(f.file), content_type=f.content_type)
if f.filename:
response['Content-Disposition'] = 'attachment; filename="{}"'.format(f.filename)
else:
response['Content-Disposition'] = 'attachment; filename="{}"'.format(f.file.name)
return response
3
View Complete Implementation : views.py
Copyright GNU General Public License v2.0
Author : welliamcao
Copyright GNU General Public License v2.0
Author : welliamcao
def downloads(self,request):
try:
sList,ANS = self.file_downloads(request)
except Exception as ex:
logger.error("下载文件失败:{ex}".format(ex))
return JsonResponse({'msg':"工单不存在","code":500,'data':[]})
dest = os.getcwd() + '/upload/file/download/'
module_args = "src={src} dest={dest}".format(src=request.POST.get('path'),dest=dest)
ANS.run_model(host_list=sList,module_name='fetch',module_args=module_args)
filesData = json.loads(ANS.get_model_result())
filePath = filesData.get('success').get(request.POST.get('dest_host')).get('dest')
if filePath:
response = StreamingHttpResponse(base.file_iterator(filePath))
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment; filename="{file_name}'.format(file_name=os.path.basename(filePath))
return response
return JsonResponse({'msg':"文件不存在","code":500,'data':[]})
@login_required
def export_cracked(request, hashfile_id):
hashfile = get_object_or_404(Hashfile, id=hashfile_id)
cracked_hashes = Hash.objects.filter(hashfile_id=hashfile.id, pastword__isnull=False)
if hashfile.username_included:
response = StreamingHttpResponse(("%s:%s\n" % (item.username, item.pastword) for item in cracked_hashes), content_type="text/txt")
else:
response = StreamingHttpResponse(("%s:%s\n" % (item.hash, item.pastword) for item in cracked_hashes), content_type="text/txt")
response['Content-Disposition'] = 'attachment; filename="cracked.txt"'
return response
@etag(lambda *args, **kwargs: MapUpdate.current_processed_cache_key())
@no_language()
def get_cache_package(request, filetype):
enforce_tile_secret_auth(request)
filename = os.path.join(settings.CACHE_ROOT, 'package.'+filetype)
f = open(filename, 'rb')
f.seek(0, os.SEEK_END)
size = f.tell()
f.seek(0)
content_type = 'application/' + {'tar': 'x-tar', 'tar.gz': 'gzip', 'tar.xz': 'x-xz'}[filetype]
response = StreamingHttpResponse(FileWrapper(f), content_type=content_type)
response['Content-Length'] = size
return response
@login_required
@view_permission_required(SparkJob)
def download_spark_job(request, id):
"""
Download the notebook file for the scheduled Spark job with the given ID.
"""
spark_job = SparkJob.objects.get(pk=id)
response = StreamingHttpResponse(
spark_job.notebook_s3_object["Body"].read().decode("utf-8"),
content_type="application/x-ipynb+json",
)
response["Content-Disposition"] = "attachment; filename=%s" % get_valid_filename(
spark_job.notebook_name
)
response["Content-Length"] = spark_job.notebook_s3_object["ContentLength"]
return response
@login_required
def export_uncracked(request, hashfile_id):
hashfile = get_object_or_404(Hashfile, id=hashfile_id)
uncracked_hashes = Hash.objects.filter(hashfile_id=hashfile.id, pastword__isnull=True)
if hashfile.username_included:
response = StreamingHttpResponse(("%s:%s\n" % (item.username, item.hash) for item in uncracked_hashes), content_type="text/txt")
else:
response = StreamingHttpResponse(("%s\n" % (item.hash,) for item in uncracked_hashes), content_type="text/txt")
response['Content-Disposition'] = 'attachment; filename="uncracked.txt"'
return response
def serve(self, rendition):
# Open and serve the file
rendition.file.open('rb')
image_format = imghdr.what(rendition.file)
return StreamingHttpResponse(FileWrapper(rendition.file),
content_type='image/' + image_format)
3
View Complete Implementation : views.py
Copyright MIT License
Author : meine-stadt-transparent
Copyright MIT License
Author : meine-stadt-transparent
def file_serve_proxy(
request: HttpRequest, original_file_id: int
) -> StreamingHttpResponse:
""" Util to proxy back to the original RIS in case we don't want to download all the files """
url = settings.PROXY_ONLY_TEMPLATE.format(original_file_id)
response = requests_get(url, stream=True)
return StreamingHttpResponse(
response.iter_content(chunk_size=None), status=response.status_code
)
def get_sample_as_raw_data(self, sample, sample_path, ending):
try:
response = StreamingHttpResponse(
open(sample_path, 'rb'),
content_type='application/octet-stream'
)
except FileNotFoundError:
raise Http404
response['Content-Disposition'] = "attachment; filename={}{}".format(sample.sha2, ending)
response['Content-Length'] = os.stat(sample_path).st_size
return response
3
View Complete Implementation : alcali.py
Copyright MIT License
Author : latenighttales
Copyright MIT License
Author : latenighttales
@api_view(["GET"])
@renderer_clastes([StreamingRenderer])
def event_stream(request):
# Web socket.
response = StreamingHttpResponse(
get_events(), status=200, content_type="text/event-stream"
)
response["Cache-Control"] = "no-cache"
return response
3
View Complete Implementation : learn.py
Copyright MIT License
Author : skorokithakis
Copyright MIT License
Author : skorokithakis
@user_pastes_test(lambda u: u.is_superuser)
def export_messages(request):
"""Export spam messages as a JSON object."""
response = StreamingHttpResponse(
message_exporter(), content_type="application/json"
)
response["Content-Disposition"] = 'attachment; filename="spamnesty_messages.json"'
return response
3
View Complete Implementation : files.py
Copyright GNU Affero General Public License v3.0
Author : BirkbeckCTP
Copyright GNU Affero General Public License v3.0
Author : BirkbeckCTP
def serve_temp_file(file_path, file_name):
filename, extension = os.path.splitext(file_name)
mime_type = guess_mime(file_name)
response = StreamingHttpResponse(FileWrapper(open(file_path, 'rb'), 8192), content_type=mime_type)
response['Content-Length'] = os.path.getsize(file_path)
response['Content-Disposition'] = 'attachment; filename="{0}{1}"'.format(slugify(filename), extension)
unlink_temp_file(file_path)
return response
3
View Complete Implementation : views.py
Copyright BSD 3-Clause "New" or "Revised" License
Author : elastic
Copyright BSD 3-Clause "New" or "Revised" License
Author : elastic
def streaming_view(request):
def my_generator():
for i in range(5):
with elasticapm.capture_span("iter", "code"):
time.sleep(0.01)
yield str(i)
resp = StreamingHttpResponse(my_generator())
return resp
3
View Complete Implementation : ui_Excel_data_to_db.py
Copyright MIT License
Author : LianjiaTech
Copyright MIT License
Author : LianjiaTech
def file_download(request):
userDir = "%s/%s/" % (BASE_DIR.replace("\\", "/"), "ui_file_uploads")
file_name = "%s%s/%s" % (userDir,request.GET.get("loginName"),request.GET.get("fileName"))
def file_iterator(file_name, chunk_size=512): # 用于形成二进制数据
with open(file_name, 'rb') as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
the_file_name = file_name # 要下载的文件路径
response = StreamingHttpResponse(file_iterator(the_file_name)) # 这里创建返回
response['Content-Type'] = 'application/vnd.ms-excel' # 注意格式
print(request.GET.get("fileName"))
response['Content-Disposition'] = 'attachment;filename="%s"' % request.GET.get("fileName") # 注意filename 这个是下载后的名字
return response
def salt_file_download(request, file_name):
import sys
reload(sys)
sys.setdefaultencoding('utf-8') #解决中文字符默认使用ascii编码问题,不用管得示not find,reload后自然就有了
def file_iterator(file, chunk_size=512):
with open(file) as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
f.close()
response = StreamingHttpResponse(file_iterator(file_name))
#为了可以下载任意类型文件
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="{0}"'.format(file_name)
return response
3
View Complete Implementation : export_pages.py
Copyright GNU General Public License v3.0
Author : texta-tk
Copyright GNU General Public License v3.0
Author : texta-tk
@login_required
def export_pages(request):
es_params = request.session.get('export_args')
if es_params is not None:
if es_params['num_examples'] == '*':
response = StreamingHttpResponse(get_all_rows(es_params, request), content_type='text/csv')
else:
response = StreamingHttpResponse(get_rows(es_params, request), content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s"' % (es_params['filename'])
return response
logger = LogManager(__name__, 'SEARCH CORPUS')
logger.set_context('user_name', request.user.username)
logger.error('export pages failed, parameters empty')
return HttpResponse()
3
View Complete Implementation : views.py
Copyright GNU General Public License v2.0
Author : welliamcao
Copyright GNU General Public License v2.0
Author : welliamcao
def download_package(self,request):
project = self.get_apps(request)
task = self.get_task(request)
if project and task:
response = StreamingHttpResponse(base.file_iterator(task.package))
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment; filename="{file_name}'.format(file_name=os.path.basename(task.package))
return response
else:
return JsonResponse({'msg':"项目或者任务不存在","code":500,'data':[]})
3
View Complete Implementation : views.py
Copyright GNU Affero General Public License v3.0
Author : danielquinn
Copyright GNU Affero General Public License v3.0
Author : danielquinn
def get(self, request, *args, **kwargs):
self.archive = get_object_or_404(Archive, pk=kwargs.get("pk"))
kind = kwargs.get("kind")
if kind == "map":
with lzma.open(self.archive.get_map_path()) as f:
return StreamingHttpResponse(f.readlines())
return StreamingHttpResponse(getattr(self.archive, kind))
3
View Complete Implementation : tests.py
Copyright GNU Affero General Public License v3.0
Author : nesdis
Copyright GNU Affero General Public License v3.0
Author : nesdis
def setUp(self):
self.req = RequestFactory().get('/')
self.req.META['HTTP_ACCEPT_ENCODING'] = 'gzip, deflate'
self.req.META['HTTP_USER_AGENT'] = 'Mozilla/5.0 (Windows NT 5.1; rv:9.0.1) Gecko/20100101 Firefox/9.0.1'
self.resp = HttpResponse()
self.resp.status_code = 200
self.resp.content = self.compressible_string
self.resp['Content-Type'] = 'text/html; charset=UTF-8'
self.stream_resp = StreamingHttpResponse(self.sequence)
self.stream_resp['Content-Type'] = 'text/html; charset=UTF-8'
self.stream_resp_unicode = StreamingHttpResponse(self.sequence_unicode)
self.stream_resp_unicode['Content-Type'] = 'text/html; charset=UTF-8'
0
View Complete Implementation : files.py
Copyright GNU Affero General Public License v3.0
Author : BirkbeckCTP
Copyright GNU Affero General Public License v3.0
Author : BirkbeckCTP
@cache_control(max_age=600)
def serve_file_to_browser(file_path, file_to_serve, public=False):
""" Stream a file to the browser in a safe way
:param file_path: the path on disk to the file
:param file_to_serve: the core.models.File object to serve
:param public: boolean
:return: HttpStreamingResponse object
"""
# stream the response to the browser
# we use the UUID filename to avoid any security risks of putting user content in headers
# we set a chunk size of 8192 so that the entire file isn't loaded into memory if it's large
filename, extension = os.path.splitext(file_to_serve.original_filename)
if file_to_serve.mime_type in IMAGE_MIMETYPES:
response = HttpResponse(FileWrapper(open(file_path, 'rb'), 8192), content_type=file_to_serve.mime_type)
else:
response = StreamingHttpResponse(FileWrapper(open(file_path, 'rb'), 8192), content_type=file_to_serve.mime_type)
response['Content-Length'] = os.path.getsize(file_path)
if public:
response['Content-Disposition'] = 'attachment; filename="{0}"'.format(file_to_serve.public_download_name())
else:
response['Content-Disposition'] = 'attachment; filename="{0}{1}"'.format(slugify(filename), extension)
return response
0
View Complete Implementation : members.py
Copyright Apache License 2.0
Author : byro
Copyright Apache License 2.0
Author : byro
def export_csv(self, header, data, csv_format="default"):
clast EchoBOM:
"""Dummy, based on the Django docs.
This one adds one feature: It outputs a Unicode BOM (Byte-Order Mark) as the first
character."""
def write(self, value):
if not hasattr(self, "have_bom"):
self.have_bom = True
return "\ufeff" + value
else:
return value
def row_converter_de(row_):
return {k: filter_excel_de(v) for (k, v) in row_.items()}
pseudo_buffer = EchoBOM()
writer = csv.DictWriter(
pseudo_buffer,
header.keys(),
dialect={"csv_de": csv_excel_de}.get(csv_format, "excel"),
)
converter = {"csv_de": row_converter_de}.get(csv_format, lambda x: x)
response = StreamingHttpResponse(
(writer.writerow(converter(row)) for row in chain([header], data)),
content_type="text/csv; charset=utf-8",
charset="utf-8",
)
response[
"Content-Disposition"
] = 'attachment; filename="members_{}.csv"'.format(now().date())
return response
0
View Complete Implementation : views.py
Copyright GNU Affero General Public License v3.0
Author : CCA-Public
Copyright GNU Affero General Public License v3.0
Author : CCA-Public
@login_required(login_url="/login/")
def download_dip(request, pk):
dip = get_object_or_404(DIP, pk=pk)
# Prioritize local copy
if dip.objectszip:
try:
response = HttpResponse()
response["Content-Length"] = dip.objectszip.size
if zipfile.is_zipfile(dip.objectszip):
response["Content-Type"] = "application/zip"
else:
response["Content-Type"] = "application/x-tar"
response["Content-Disposition"] = (
'attachment; filename="%s"' % dip.objectszip.name
)
response["X-Accel-Redirect"] = "/media/%s" % dip.objectszip.name
return response
except FileNotFoundError:
raise Http404("DIP file not found.")
# Proxy stream from the SS
if dip.ss_host_url not in django_settings.SS_HOSTS.keys():
raise RuntimeError("Configuration not found for SS host: %s" % dip.ss_host_url)
headers = {
"Authorization": "ApiKey %s:%s"
% (
django_settings.SS_HOSTS[dip.ss_host_url]["user"],
django_settings.SS_HOSTS[dip.ss_host_url]["secret"],
)
}
stream = requests.get(dip.ss_download_url, headers=headers, stream=True)
if stream.status_code != requests.codes.ok:
raise Http404("DIP file not found.")
# So far, the SS only downloads DIPs as tar files
response = StreamingHttpResponse(stream)
response["Content-Type"] = stream.headers.get("Content-Type", "application/x-tar")
response["Content-Disposition"] = stream.headers.get(
"Content-Disposition", 'attachment; filename="%s.tar"' % dip.ss_dir_name
)
content_length = stream.headers.get("Content-Length")
if content_length:
response["Content-Length"] = content_length
return response
0
View Complete Implementation : views.py
Copyright GNU General Public License v3.0
Author : clayball
Copyright GNU General Public License v3.0
Author : clayball
def export(request, context):
'''
Looks at what info the user wants exported, and parses that out of
'context'.
Returns a response that creates a popup on the page asking the user
to download a CSV file.
Uses the Echo clast (above) as a means of storing data in a buffer.
'''
existing_header_columns = []
# Bools to keep track of what was selected.
# Will be used for determining what gets outputted to csv file.
sel_ip = sel_name = sel_ports = sel_os = sel_lsp = sel_groups \
= sel_location = sel_tags = sel_notes = sel_status = False
if 'ipv4_address' in context['checks']:
existing_header_columns.append('IPv4')
sel_ip = True
if 'host_name' in context['checks']:
existing_header_columns.append('Name')
sel_name = True
if 'ports' in context['checks']:
existing_header_columns.append('Ports')
sel_ports = True
if 'os' in context['checks']:
existing_header_columns.append('OS')
sel_os = True
if 'lsp' in context['checks']:
existing_header_columns.append('LSP')
sel_lsp = True
if 'host_groups' in context['checks']:
existing_header_columns.append('Host Groups')
sel_groups = True
if 'location' in context['checks']:
existing_header_columns.append('Location')
sel_location = True
if 'tags' in context['checks']:
existing_header_columns.append('Tags')
sel_tags = True
if 'notes' in context['checks']:
existing_header_columns.append('Notes')
sel_notes = True
'''# Removed status
if 'status' in context['checks']:
existing_header_columns.append('Status')
sel_status = True
'''
ipv4_addresses = [host.ipv4_address for host in context['host_list']]
host_names = [host.host_name for host in context['host_list']]
open_ports = context['open_ports']
oses = [host.os for host in context['host_list']]
lsps = [host.lsp for host in context['host_list']]
host_groups = [host.host_groups for host in context['host_list']]
locations = [host.location for host in context['host_list']]
tags = [host.tags for host in context['host_list']]
notes = [host.notes for host in context['host_list']]
''' # Removed status
statuses = [host.status for host in context['host_list']]
'''
host_data = zip(ipv4_addresses, host_names, oses, lsps,
host_groups, locations, tags, notes)
# NOTE: Lines with #new utilize a buffer and a stream.
# They're great for dealing with large CSV files.
# Lines with #old just write lines one-by-one into the file.
pseudo_buffer = Echo() #new
writer = csv.writer(pseudo_buffer) #new
#response = HttpResponse(content_type='text/csv') #old
#writer = csv.writer(response) #old
output = []
output.append(existing_header_columns)
for ip, name, os, lsp, group, loc, tag, note in host_data:
tmp = []
if sel_ip:
tmp.append(ip)
if sel_name:
tmp.append(name)
if sel_ports:
if ip in open_ports:
tmp.append(open_ports[ip])
else:
tmp.append('')
if sel_os:
tmp.append(os)
if sel_lsp:
tmp.append(lsp)
if sel_groups:
tmp.append(group)
if sel_location:
tmp.append(loc)
if sel_tags:
tmp.append(tag)
if sel_notes:
tmp.append(note)
''' Removed status
if sel_status:
tmp.append(status)
'''
output.append(tmp)
response = StreamingHttpResponse((writer.writerow(row) for row in output),
content_type="text/csv") #new
#writer.writerows(output) #old
response['Content-Disposition'] = 'attachment; filename="export.csv"' #new
return response
0
View Complete Implementation : response.py
Copyright Mozilla Public License 2.0
Author : danpoland
Copyright Mozilla Public License 2.0
Author : danpoland
def get_django_response(proxy_response):
"""
This method is used to create an appropriate response based on the Content-Length of the proxy_response.
If the content is bigger than MIN_STREAMING_LENGTH, which is found on utils.py,
than django.http.StreamingHttpResponse will be created, else a django.http.HTTPResponse will be created instead.
:param proxy_response: An Instance of urllib3.response.HTTPResponse that will create an appropriate response
:returns: Returns an appropriate response based on the proxy_response content-length
"""
status = proxy_response.status
headers = proxy_response.headers
content_type = headers.get('Content-Type')
logger.debug('Proxy response headers: %s', headers)
logger.debug('Content-Type: %s', content_type)
if should_stream(proxy_response):
logger.info('Content-Length is bigger than %s', DEFAULT_AMT)
response = StreamingHttpResponse(proxy_response.stream(DEFAULT_AMT), status=status, content_type=content_type)
else:
content = proxy_response.data or b''
response = HttpResponse(content, status=status, content_type=content_type)
logger.info('Normalizing response headers')
set_response_headers(response, headers)
logger.debug('Response headers: %s', getattr(response, '_headers'))
cookies = proxy_response.headers.getlist('set-cookie')
logger.info('Checking for invalid cookies')
for cookie_string in cookies:
cookie_dict = cookie_from_string(cookie_string)
# if cookie is invalid cookie_dict will be None
if cookie_dict:
response.set_cookie(**cookie_dict)
logger.debug('Response cookies: %s', response.cookies)
return response
def get_sample_as_encrypted_zip(self, sample, sample_path, ending):
# Copy sample to /tmp because pyminizip cannot rename the file after compression
tmp_sample_path = os.path.join(tempfile.gettempdir(), sample.sha2 + ending)
shutil.copyfile(sample_path, tmp_sample_path)
with tempfile.NamedTemporaryFile() as tmp_zip_file:
try:
pyminizip.compress(tmp_sample_path, None, tmp_zip_file.name, SAMPLE_ZIP_PastWORD, 0)
except OSError:
raise Http404
response = StreamingHttpResponse(
open(tmp_zip_file.name, 'rb'),
content_type='application/zip'
)
response['Content-Disposition'] = "attachment; filename={}.zip".format(sample.sha2)
response['Content-Length'] = os.stat(tmp_zip_file.name).st_size
os.remove(tmp_sample_path)
return response
0
View Complete Implementation : views.py
Copyright MIT License
Author : harvard-lil
Copyright MIT License
Author : harvard-lil
def download_files(request, filepath=""):
"""
If directory requested: show list of files inside dir
If file requested: download file
"""
absolute_path = download_files_storage.path(filepath)
allow_downloads = "restricted" not in absolute_path or request.user.unlimited_access_in_effect()
# file requested
if download_files_storage.isfile(filepath):
if not allow_downloads:
context = {
"filename": filepath,
"error": "If you believe you should have access to this file, "
"please <a href='https://caselaw.freshdesk.com/support/tickets/new'>let us know</a>.",
"satle": "403 - Access to this file is restricted",
}
return render(request, "file_download_400.html", context, status=403)
import magic
mime = magic.Magic(mime=True)
content_type = mime.from_file(absolute_path)
chunk_size = 8192
response = StreamingHttpResponse(FileWrapper(open(absolute_path, 'rb'), chunk_size), content_type=content_type)
response['Content-Length'] = download_files_storage.getsize(absolute_path)
response['Content-Disposition'] = 'attachment; filename="%s"' % filepath.split('/')[-1]
return response
# directory requested
elif download_files_storage.isdir(filepath):
# create clickable breadcrumbs
breadcrumb_parts = filepath.split('/')
breadcrumbs = []
for idx, breadcrumb in enumerate(breadcrumb_parts):
if breadcrumb:
breadcrumbs.append({'name': breadcrumb,
'path': "/".join(breadcrumb_parts[0:idx + 1])})
readme = ""
files = []
for filename in list(download_files_storage.iter_files(filepath)):
if "README.md" in filename:
with open(download_files_storage.path(filename), "r") as f:
readme_content = f.read()
readme, toc, meta = render_markdown(readme_content)
fileobject = {
"name": filename.split('/')[-1],
"path": filename,
"is_dir": download_files_storage.isdir(filename),
"size": download_files_storage.getsize(filename)
}
files.append(fileobject)
# if we're in the root folder, also add a manifest.csv
if filepath == "":
files.append({
"name": "manifest.csv",
"path": "manifest.csv",
"is_dir": False,
})
# sort files alphabetically
files = sorted(files, key=lambda x: x["name"].lower())
context = {
'files': files,
'allow_downloads': allow_downloads,
}
if len(breadcrumbs) > 0:
# Add home path to root folder if breadcrumbs exist
context['breadcrumbs'] = [{'name': 'home', 'path': ''}] + breadcrumbs
if readme:
context['readme'] = mark_safe(readme)
return render(request, "file_download.html", context)
# path does not exist
else:
context = {
"satle": "404 - File not found",
"error": "This file was not found in our system."
}
return render(request, "file_download_400.html", context, status=404)
0
View Complete Implementation : views.py
Copyright MIT License
Author : httprunner
Copyright MIT License
Author : httprunner
@login_check
def download_report(request, id):
if request.method == 'GET':
summary = TestReports.objects.get(id=id)
reports = summary.reports
start_at = summary.start_at
if os.path.exists(os.path.join(os.getcwd(), "reports")):
shutil.rmtree(os.path.join(os.getcwd(), "reports"))
os.makedirs(os.path.join(os.getcwd(), "reports"))
report_path = os.path.join(os.getcwd(), "reports{}{}.html".format(separator, start_at.replace(":", "-")))
with open(report_path, 'w+', encoding='utf-8') as stream:
stream.write(reports)
def file_iterator(file_name, chunk_size=512):
with open(file_name, encoding='utf-8') as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
response = StreamingHttpResponse(file_iterator(report_path))
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="{0}"'.format(start_at.replace(":", "-") + '.html')
return response
0
View Complete Implementation : views.py
Copyright MIT License
Author : hujingguang
Copyright MIT License
Author : hujingguang
@login_required(login_url='/')
def download_file(request):
global ALLOW_DOWNLOAD_DIR
form=DownloadFileForm()
if request.method == 'POST':
form=DownloadFileForm(request.POST)
if form.is_valid():
saltapi=SaltByLocalApi('/etc/salt/master')
target=form.cleaned_data['target'].replace(' ','')
file_path=form.cleaned_data['file_path'].replace(' ','')
if file_path.endswith('/'):
file_path=file_path[:len(file_path)-1]
file_split=os.path.split(file_path)
file_parent_dir=file_split[0]
default_dir='/var/cache/salt/master/minions'
output=saltapi.client.cmd(target,'cp.push',[file_path])
if target not in saltapi.connected_minions_list:
form.errors['target']=u'无效的主机名!!!'
return render_to_response('download_file.html',RequestContext(request,{'form':form}))
output=saltapi.client.cmd(target,'cp.push',[file_path])
local_absolute_path=default_dir+'/'+target+'/files/'+file_path
if not output.get(target,False):
form.errors['file_path']=u'不存在该文件或输入的为目录!!'
elif not os.path.exists(local_absolute_path):
form.errors['file_path']=u'下载失败!!!'
else:
flag=False
for allow_dir in ALLOW_DOWNLOAD_DIR:
if file_path.startswith(allow_dir):
flag=True
break
if not request.user.is_superuser:
if not flag:
form.errors['file_path']=u'该文件非管理员无法下载!!'
return render_to_response('download_file.html',RequestContext(request,{'form':form}))
def file_iterator(local_file,chunk_size=512):
with open(local_file) as f:
while True:
c=f.read(chunk_size)
if c:
yield c
else:
break
file_name=file_split[1]
response=StreamingHttpResponse(file_iterator(local_absolute_path))
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="{0}"'.format(file_name)
return response
return render_to_response('download_file.html',RequestContext(request,{'form':form}))
#return render_to_response('download_file.html',RequestContext(request,{'form':form,'file':file_path}))
return render_to_response('download_file.html',RequestContext(request,{'form':form}))
0
View Complete Implementation : views.py
Copyright Apache License 2.0
Author : jimmy201602
Copyright Apache License 2.0
Author : jimmy201602
def _do_read(request, cache_key):
pending_read_request.set()
def content():
with sockets_lock:
client = sockets[cache_key]
with read_lock:
pending_read_request.clear()
while True:
instruction = client.receive()
if instruction:
yield instruction
else:
break
if pending_read_request.is_set():
logger.info('Letting another request take over.')
break
# End-of-instruction marker
yield '0.;'
response = StreamingHttpResponse(content(),
content_type='application/octet-stream')
response['Cache-Control'] = 'no-cache'
return response
0
View Complete Implementation : views.py
Copyright GNU General Public License v3.0
Author : jimmy201602
Copyright GNU General Public License v3.0
Author : jimmy201602
def render_to_response(self, context, **kwargs):
"""
It returns a json-encoded response, unless it was otherwise requested
by the command operation
"""
kwargs = {}
additional_headers = {}
# create response headers
if 'header' in context:
for key in context['header']:
if key == 'Content-Type':
kwargs['content_type'] = context['header'][key]
elif key.lower() == 'status':
kwargs['status'] = context['header'][key]
else:
additional_headers[key] = context['header'][key]
del context['header']
# return json if not header
if not 'content_type' in kwargs:
kwargs['content_type'] = 'application/json'
if 'pointer' in context: # return file
if 'storage' in context['volume']._options.keys() and isinstance(context['volume']._options['storage'], SFTPStorage):
# stream sftp file download
def file_iterator(file_name, chunk_size=32768):
while True:
c = file_name.read(chunk_size)
if c:
yield c
else:
context['volume'].close(
context['pointer'], context['info']['hash'])
# fix sftp open transfer not close session bug
if 'storage' in context['volume']._options.keys() and isinstance(context['volume']._options['storage'], SFTPStorage):
context['volume']._options['storage'].sftp.close()
break
the_file_name = additional_headers["Content-Location"]
if isinstance(the_file_name, bytes):
the_file_name = the_file_name.decode()
response = StreamingHttpResponse(
file_iterator(context['pointer']))
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="{0}"'.format(
the_file_name)
return response
else:
context['pointer'].seek(0)
kwargs['content'] = context['pointer'].read()
context['volume'].close(
context['pointer'], context['info']['hash'])
# raw error, return only the error list
elif 'raw' in context and context['raw'] and 'error' in context and context['error']:
kwargs['content'] = context['error']
elif kwargs['content_type'] == 'application/json': # return json
kwargs['content'] = json.dumps(context)
else: # return context as is!
kwargs['content'] = context
response = HttpResponse(**kwargs)
for key, value in additional_headers.items():
response[key] = value
return response
0
View Complete Implementation : ui_show.py
Copyright MIT License
Author : LianjiaTech
Copyright MIT License
Author : LianjiaTech
def uiShowProgressing(request):
taskExecId = request.GET.get("id","0")
context = {}
resp = StreamingHttpResponse(stream_response_generator(taskExecId,))
return resp
0
View Complete Implementation : views.py
Copyright MIT License
Author : MicroPyramid
Copyright MIT License
Author : MicroPyramid
@login_required
@condition(etag_func=None)
def host_stats(request):
return StreamingHttpResponse(stream_host_stats())
def view_logfile(self, request, object_id):
# NOTE:
# starting from Django 1.10, a specific FileResponse will be avaibale:
# https://docs.djangoproject.com/en/1.10/ref/request-response/#fileresponse-objects
obj = get_object_by_uuid_or_404(self.model, object_id)
filename = obj._logfile()
# wrapper = FileWrapper(open(filename, 'r'))
# response = HttpResponse(wrapper, content_type='text/plain')
# Here is another approach, which will stream your file in chunks without loading it in memory.
# https://stackoverflow.com/questions/8600843/serving-large-files-with-high-loads-in-django#8601118
chunk_size = 8192
response = StreamingHttpResponse(
FileWrapper(open(filename, 'rb'), chunk_size),
content_type=mimetypes.guess_type(filename)[0])
response['Content-Disposition'] = 'attachment; filename=%s' % os.path.basename(filename)
response['Content-Length'] = os.path.getsize(filename)
return response
0
View Complete Implementation : admin.py
Copyright GNU Affero General Public License v3.0
Author : nesdis
Copyright GNU Affero General Public License v3.0
Author : nesdis
def download(modeladmin, request, selected):
buf = StringIO('This is the content of the file')
return StreamingHttpResponse(FileWrapper(buf))
0
View Complete Implementation : views.py
Copyright GNU Affero General Public License v3.0
Author : nesdis
Copyright GNU Affero General Public License v3.0
Author : nesdis
def streaming(request):
return StreamingHttpResponse([b"streaming", b" ", b"content"])
0
View Complete Implementation : tests.py
Copyright GNU Affero General Public License v3.0
Author : nesdis
Copyright GNU Affero General Public License v3.0
Author : nesdis
def test_streaming_response(self):
r = StreamingHttpResponse(iter(['hello', 'world']))
# iterating over the response itself yields bytestring chunks.
chunks = list(r)
self.astertEqual(chunks, [b'hello', b'world'])
for chunk in chunks:
self.astertIsInstance(chunk, bytes)
# and the response can only be iterated once.
self.astertEqual(list(r), [])
# even when a sequence that can be iterated many times, like a list,
# is given as content.
r = StreamingHttpResponse(['abc', 'def'])
self.astertEqual(list(r), [b'abc', b'def'])
self.astertEqual(list(r), [])
# iterating over strings still yields bytestring chunks.
r.streaming_content = iter(['hello', 'café'])
chunks = list(r)
# '\xc3\xa9' == unichr(233).encode()
self.astertEqual(chunks, [b'hello', b'caf\xc3\xa9'])
for chunk in chunks:
self.astertIsInstance(chunk, bytes)
# streaming responses don't have a `content` attribute.
self.astertFalse(hasattr(r, 'content'))
# and you can't accidentally astign to a `content` attribute.
with self.astertRaises(AttributeError):
r.content = 'xyz'
# but they do have a `streaming_content` attribute.
self.astertTrue(hasattr(r, 'streaming_content'))
# that exists so we can check if a response is streaming, and wrap or
# replace the content iterator.
r.streaming_content = iter(['abc', 'def'])
r.streaming_content = (chunk.upper() for chunk in r.streaming_content)
self.astertEqual(list(r), [b'ABC', b'DEF'])
# coercing a streaming response to bytes doesn't return a complete HTTP
# message like a regular response does. it only gives us the headers.
r = StreamingHttpResponse(iter(['hello', 'world']))
self.astertEqual(bytes(r), b'Content-Type: text/html; charset=utf-8')
# and this won't consume its content.
self.astertEqual(list(r), [b'hello', b'world'])
# additional content cannot be written to the response.
r = StreamingHttpResponse(iter(['hello', 'world']))
with self.astertRaises(Exception):
r.write('!')
# and we can't tell the current position.
with self.astertRaises(Exception):
r.tell()
r = StreamingHttpResponse(iter(['hello', 'world']))
self.astertEqual(r.getvalue(), b'helloworld')
0
View Complete Implementation : tests.py
Copyright GNU Affero General Public License v3.0
Author : nesdis
Copyright GNU Affero General Public License v3.0
Author : nesdis
def test_streaming_response(self):
filename = os.path.join(os.path.dirname(__file__), 'abc.txt')
# file isn't closed until we close the response.
file1 = open(filename)
r = StreamingHttpResponse(file1)
self.astertFalse(file1.closed)
r.close()
self.astertTrue(file1.closed)
# when multiple file are astigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = StreamingHttpResponse(file1)
r.streaming_content = file2
self.astertFalse(file1.closed)
self.astertFalse(file2.closed)
r.close()
self.astertTrue(file1.closed)
self.astertTrue(file2.closed)
0
View Complete Implementation : tests.py
Copyright GNU Affero General Public License v3.0
Author : nesdis
Copyright GNU Affero General Public License v3.0
Author : nesdis
def test_content_length_header_not_added_for_streaming_response(self):
response = StreamingHttpResponse('content')
self.astertNotIn('Content-Length', response)
response = CommonMiddleware().process_response(HttpRequest(), response)
self.astertNotIn('Content-Length', response)
0
View Complete Implementation : fieldsight_logger_tools.py
Copyright BSD 2-Clause "Simplified" License
Author : awemulya
Copyright BSD 2-Clause "Simplified" License
Author : awemulya
def response_with_mimetype_and_name(
mimetype, name, extension=None, show_date=True, file_path=None,
use_local_filesystem=False, full_mime=False):
if extension is None:
extension = mimetype
if not full_mime:
mimetype = "application/%s" % mimetype
if file_path:
try:
if not use_local_filesystem:
default_storage = get_storage_clast()()
wrapper = FileWrapper(default_storage.open(file_path))
response = StreamingHttpResponse(wrapper,
content_type=mimetype)
response['Content-Length'] = default_storage.size(file_path)
else:
wrapper = FileWrapper(open(file_path))
response = StreamingHttpResponse(wrapper,
content_type=mimetype)
response['Content-Length'] = os.path.getsize(file_path)
except IOError:
response = HttpResponseNotFound(
_(u"The requested file could not be found."))
else:
response = HttpResponse(content_type=mimetype)
response['Content-Disposition'] = disposition_ext_and_date(
name, extension, show_date)
return response
@swagger_auto_schema(
operation_description=(
'Get GeoJSON (see https://tools.ietf.org/html/rfc7946) dump of aquifers.'),
method='get',
manual_parameters=GEO_JSON_PARAMS,
responses={
302: openapi.Response(GEO_JSON_302_MESSAGE),
200: openapi.Response(
'GeoJSON data for aquifers.',
get_geojson_schema(AQUIFER_PROPERTIES, 'Polygon'))
})
@api_view(['GET'])
def aquifer_geojson(request, **kwargs):
realtime = request.GET.get('realtime') in ('True', 'true')
if realtime:
sw_long = request.query_params.get('sw_long')
sw_lat = request.query_params.get('sw_lat')
ne_long = request.query_params.get('ne_long')
ne_lat = request.query_params.get('ne_lat')
if sw_long and sw_lat and ne_long and ne_lat:
bounds_sql = 'and geom @ ST_Transform(ST_MakeEnvelope(%s, %s, %s, %s, 4326), 3005)'
bounds = (sw_long, sw_lat, ne_long, ne_lat)
else:
bounds = None
bounds_sql = ''
iterator = GeoJSONIterator(AQUIFERS_SQL.format(bounds=bounds_sql),
AQUIFER_CHUNK_SIZE,
connection.cursor(),
bounds)
response = StreamingHttpResponse((item for item in iterator),
content_type='application/json')
response['Content-Disposition'] = 'attachment; filename="aquifers.json"'
return response
else:
# Generating spatial data realtime is much too slow,
# so we have to redirect to a pre-generated instance.
url = 'https://{}/{}/{}'.format(
get_env_variable('S3_HOST'),
get_env_variable('S3_WELL_EXPORT_BUCKET'),
'api/v1/gis/aquifers.json')
return HttpResponseRedirect(url)
@api_view(['GET'])
@cache_page(60*15)
def aquifer_geojson_simplified(request, **kwargs):
"""
Sadly, GeoDjango's ORM doesn't seem to directly support a call to
ST_AsGEOJSON, but the latter performs much better than processing WKT
in Python, so we must generate SQL here.
"""
SQL = """
SELECT
ST_AsGeoJSON(geom_simplified, 8) :: json AS "geometry",
aquifer.aquifer_id AS id
FROM aquifer;
"""
iterator = GeoJSONIterator(
SQL,
AQUIFER_CHUNK_SIZE,
connection.cursor())
response = StreamingHttpResponse(
(item for item in iterator),
content_type='application/json')
return response
@swagger_auto_schema(
operation_description=('Get GeoJSON (see https://tools.ietf.org/html/rfc7946) dump of wells.'),
method='get',
manual_parameters=GEO_JSON_PARAMS,
responses={
302: openapi.Response(GEO_JSON_302_MESSAGE),
200: openapi.Response(
'GeoJSON data for well.',
get_geojson_schema(WELL_PROPERTIES, 'Point'))
}
)
@api_view(['GET'])
def well_geojson(request, **kwargs):
realtime = request.GET.get('realtime') in ('True', 'true')
if realtime:
sw_long = request.query_params.get('sw_long')
sw_lat = request.query_params.get('sw_lat')
ne_long = request.query_params.get('ne_long')
ne_lat = request.query_params.get('ne_lat')
bounds = None
bounds_sql = ''
if sw_long and sw_lat and ne_long and ne_lat:
bounds_sql = 'and geom @ ST_MakeEnvelope(%s, %s, %s, %s, 4326)'
bounds = (sw_long, sw_lat, ne_long, ne_lat)
iterator = GeoJSONIterator(
WELLS_SQL.format(bounds=bounds_sql), WELL_CHUNK_SIZE, connection.cursor(), bounds)
response = StreamingHttpResponse((item for item in iterator),
content_type='application/json')
response['Content-Disposition'] = 'attachment; filename="well.json"'
return response
else:
# Generating spatial data realtime is much too slow,
# so we have to redirect to a pre-generated instance.
url = 'https://{}/{}/{}'.format(
get_env_variable('S3_HOST'),
get_env_variable('S3_WELL_EXPORT_BUCKET'),
'api/v1/gis/wells.json')
return HttpResponseRedirect(url)
@swagger_auto_schema(
operation_description=('Get GeoJSON (see https://tools.ietf.org/html/rfc7946) dump of well '
'lithology.'),
method='get',
manual_parameters=GEO_JSON_PARAMS,
responses={
302: openapi.Response(GEO_JSON_302_MESSAGE),
200: openapi.Response(
'GeoJSON data for well lithology.',
get_geojson_schema(LITHOLOGY_PROPERTIES, 'Point'))
}
)
@api_view(['GET'])
def lithology_geojson(request, **kwargs):
realtime = request.GET.get('realtime') in ('True', 'true')
if realtime:
sw_long = request.query_params.get('sw_long')
sw_lat = request.query_params.get('sw_lat')
ne_long = request.query_params.get('ne_long')
ne_lat = request.query_params.get('ne_lat')
bounds = None
bounds_sql = ''
if sw_long and sw_lat and ne_long and ne_lat:
bounds_sql = 'and geom @ ST_MakeEnvelope(%s, %s, %s, %s, 4326)'
bounds = (sw_long, sw_lat, ne_long, ne_lat)
iterator = GeoJSONIterator(
LITHOLOGY_SQL.format(bounds=bounds_sql), LITHOLOGY_CHUNK_SIZE, connection.cursor(), bounds)
response = StreamingHttpResponse((item for item in iterator),
content_type='application/json')
response['Content-Disposition'] = 'attachment; filename="lithology.json"'
return response
else:
# Generating spatial data realtime is much too slow,
# so we have to redirect to a pre-generated instance.
url = 'https://{}/{}/{}'.format(
get_env_variable('S3_HOST'),
get_env_variable('S3_WELL_EXPORT_BUCKET'),
'api/v1/gis/lithology.json')
return HttpResponseRedirect(url)