django.db.close_old_connections - python examples

Here are the examples of the python api django.db.close_old_connections taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

22 Examples 7

3 View Complete Implementation : routing.py
Copyright MIT License
Author : aduranil
    def __call__(self, scope):
        try:
            token_key = scope["query_string"].decode().split("=")[1]
            if token_key:
                token = Token.objects.get(key=token_key)
                scope["user"] = token.user
                close_old_connections()
        except Token.DoesNotExist:
            scope["user"] = AnonymousUser()
        return self.inner(scope)

3 View Complete Implementation : commons.py
Copyright MIT License
Author : cuda-networks
@contextmanager
def django_db_management():
    # type: () -> None
    reset_queries()
    close_old_connections()
    try:
        yield
    finally:
        close_old_connections()

3 View Complete Implementation : base.py
Copyright Apache License 2.0
Author : edisonlz
    def execute(self, query, args=None):
        try:
            # args is None means no string interpolation
            # sql_statment = str(query).lower()
            # c_time = datetime.datetime.now().strftime('%y-%m-%d %H:%M:%S')
            # if 'select' not in sql_statment and 'homepublishedvideo' in sql_statment:
            #     print 'search_sql', c_time, query, args
            return self.cursor.execute(query, args)
        except Database.OperationalError as e:
            # CR_SERVER_GONE_ERROR, CR_SERVER_LOST
            if e.args[0] in (2006, 2013):
                from django.db import close_old_connections
                close_old_connections()
            # Map some error codes to IntegrityError, since they seem to be
            # misclastified and Django would prefer the more logical place.
            if e.args[0] in self.codes_for_integrityerror:
                six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
            raise

3 View Complete Implementation : base.py
Copyright Apache License 2.0
Author : edisonlz
    def executemany(self, query, args):
        try:
            return self.cursor.executemany(query, args)
        except Database.OperationalError as e:
            if e.args[0] in (2006, 2013):
                from django.db import close_old_connections
                close_old_connections()
            # Map some error codes to IntegrityError, since they seem to be
            # misclastified and Django would prefer the more logical place.
            if e.args[0] in self.codes_for_integrityerror:
                six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
            raise

3 View Complete Implementation : monitor.py
Copyright Apache License 2.0
Author : erigones
def que_monitor_loop(server, worker):
    log = worker.log

    while True:
        try:
            que_monitor(cq, _info=log.info, _debug=log.debug)
        except OPERATIONAL_ERRORS as ex:
            log.exception(ex)
            log.critical('Dedicated que event monitor terminated. Closing DB connection and restarting in 1 second...')
            from django import db
            db.close_old_connections()
        except Exception as ex:
            log.exception(ex)
            log.critical('Dedicated que event monitor terminated. Restarting in 5 seconds...')
            sleep(5)

3 View Complete Implementation : namespaces.py
Copyright Apache License 2.0
Author : erigones
    def exception_handler_decorator(self, fun):
        """Close DB connection here - https://github.com/abourget/gevent-socketio/issues/174"""
        def wrap(*args, **kwargs):
            self.log('APINamespace.%s(%s, %s)', fun.__name__, args, kwargs, level=DEBUG)
            try:
                return fun(*args, **kwargs)
            finally:
                close_old_connections()
        return wrap

3 View Complete Implementation : views.py
Copyright Apache License 2.0
Author : erigones
def socketio(request):
    """
    Starting socket.io connection here.
    """
    if request.user.is_authenticated():
        if 'socketio' in request.environ:
            socketio_manage(request.environ, namespaces={'': APINamespace}, request=request)
            try:
                return HttpResponse(None)
            finally:
                close_old_connections()
        else:
            return HttpResponse(None, status=204)
    else:
        raise PermissionDenied

3 View Complete Implementation : task_utils.py
Copyright GNU Affero General Public License v3.0
Author : LexPredict
    @staticmethod
    def prepare_task_execution():
        """

        Clearing of old database connections for CONN_MAX_AGE option (database connection settings)

        """
        if not TaskUtils.is_celery_worker():
            return

        try:
            if TaskUtils.__connection_initialization_finished:
                close_old_connections()
            else:
                for conn in connections.all():
                    conn.close()
                    TaskUtils.__connection_initialization_finished = True
        except Exception:
            past

3 View Complete Implementation : authentication.py
Copyright GNU Affero General Public License v3.0
Author : TheSpaghettiDetective
    def __call__(self, scope):
        close_old_connections()

        headers = dict(scope['headers'])
        try:
            if b'authorization' in headers:
                token_name, token_key = headers[b'authorization'].decode().split()
                if token_name == 'bearer':
                    printer = Printer.objects.select_related('user').get(auth_token=token_key)
                    printer.is_authenticated = True   # To make Printer duck-quack as authenticated User in Django Channels
                    scope['user'] = printer
            elif scope['path'].startswith('/ws/shared/'):
                printer = SharedResource.objects.select_related('printer').get(share_token=scope['path'].split('/')[-2]).printer # scope['path'].split('/')[-2] is the share_token in uri
                printer.is_authenticated = True   # To make Printer duck-quack as authenticated User in Django Channels
                scope['user'] = printer
        except ObjectDoesNotExist:
            past
        return self.inner(scope)

0 View Complete Implementation : graphql_ws_consumer.py
Copyright MIT License
Author : datadvance
    async def _run_in_worker(self, func):
        """Run a function in a thread with an event loop.

        Run the `func` in a thread within a thread pool and wait for it
        to finish. Each such thread has initialized event loop which is
        NOT running. So the `func` can use it, for example by running
        `asyncio.get_event_loop().run_until_complete(...)`. We also
        cleanup Django database connections when `func` finishes.

        Args:
            func: The function to run in a thread.

        Returns:
            The result of the `func` invocation.

        """
        # astert we run in a proper thread.
        self._astert_thread()

        def thread_func():
            """Wrap the `func` to init eventloop and to cleanup."""
            # Create an eventloop if this worker thread does not have
            # one yet. Eventually each worker will have its own
            # eventloop for `func` can use it.
            try:
                loop = asyncio.get_event_loop()
            except RuntimeError:
                loop = asyncio.new_event_loop()
                asyncio.set_event_loop(loop)

            # Run a given function in a thread.
            try:
                return func()
            finally:
                # The `func` can open the connections to the database
                # so let's close them. Django Channels does the same in
                # the `channels.db.database_sync_to_async`.
                django.db.close_old_connections()

        return await asyncio.get_running_loop().run_in_executor(
            self._workers, thread_func
        )

0 View Complete Implementation : signals.py
Copyright Apache License 2.0
Author : edx
@task_prerun.connect
def start_user_task(sender=None, **kwargs):  # pylint: disable=unused-argument
    """
    Update the status record when execution of a :py:clast:`UserTaskMixin` begins.
    """
    try:
        current_connection = transaction.get_connection()
    except Exception:  # pylint: disable=broad-except
        current_connection = None

    # We may be running this task in the context of an atomic transaction.
    # If so, it's possible that the current transaction could be forced closed
    # due to this block of code, where the `AUTOCOMMIT` defined in settings
    # does not match the autocommit state of the current transaction:
    # https://github.com/django/django/blob/1.11.24/django/db/backends/base/base.py#L510-L512
    # Thus, we should only try to close old connections if we're not currently in an atomic block.
    if current_connection and (not current_connection.in_atomic_block):
        # This should close any obsolete connections,
        # forcing Django to grab a new connection the next time it makes a query.
        # See: https://github.com/django/django/blob/master/django/db/__init__.py#L55
        close_old_connections()

    if isinstance(sender, UserTaskMixin):
        sender.status.start()

0 View Complete Implementation : database.py
Copyright Apache License 2.0
Author : GoogleCloudPlatform
    @contextlib.contextmanager
    def with_cloud_sql_proxy(self,
                             project_id: str,
                             instance_name: str,
                             cloud_sql_proxy_path: Optional[str] = None,
                             region: str = 'us-west1',
                             port: int = 5432):
        """A context manager to run and kill cloud sql proxy subprocesses.

        Used to provides secure access to your Cloud SQL Second Generation
        instances without having to whitelist IP addresses or configure SSL.
        For more information:
        https://cloud.google.com/sql/docs/postgres/sql-proxy

        Args:
            project_id: GCP project id.
            instance_name: Name of the Cloud SQL instance cloud sql proxy
                targets at.
            cloud_sql_proxy_path: The command to run your cloud sql proxy.
            region: Where the Cloud SQL instance is in.
            port: The port your Postgres database is using. By default it is
                5432.

        Yields:
            None

        Raises:
            DatabaseError: If cloud sql proxy failed to start after 5 seconds.
        """
        try:
            db.close_old_connections()
        except django.core.exceptions.ImproperlyConfigured:
            # The Django environment is not correctly setup. This might be
            # because we are calling Django management commands with subprocess
            # calls. In this case the subprocess we are calling will handle
            # closing of old connections.
            past
        instance_connection_string = '{0}:{1}:{2}'.format(
            project_id, region, instance_name)
        instance_flag = '-instances={}=tcp:{}'.format(
            instance_connection_string, port)
        if cloud_sql_proxy_path is None:
            cloud_sql_proxy_path = shutil.which('cloud_sql_proxy')
            astert cloud_sql_proxy_path, 'could not find cloud_sql_proxy_path'
        process = popen_spawn.PopenSpawn([cloud_sql_proxy_path, instance_flag])
        try:
            # Make sure cloud sql proxy is started before doing the real work
            process.expect('Ready for new connections', timeout=60)
            yield
        except pexpect.exceptions.TIMEOUT:
            raise DatabaseError(
                ('Cloud SQL Proxy was unable to start after 60 seconds. Output '
                 'of cloud_sql_proxy: \n{}').format(process.before))
        except pexpect.exceptions.EOF:
            raise DatabaseError(
                ('Cloud SQL Proxy exited unexpectedly. Output of '
                 'cloud_sql_proxy: \n{}').format(process.before))
        finally:
            process.kill(signal.SIGTERM)

0 View Complete Implementation : execute_sql.py
Copyright Apache License 2.0
Author : hhyo
def execute_callback(task):
    """异步任务的回调, 将结果填入数据库等等
    使用django-q的hook, 传入参数为整个task
    task.result 是真正的结果
    """
    # https://stackoverflow.com/questions/7835272/django-operationalerror-2006-mysql-server-has-gone-away
    if connection.connection and not connection.is_usable():
        close_old_connections()
    workflow_id = task.args[0]
    workflow = SqlWorkflow.objects.get(id=workflow_id)
    workflow.finish_time = task.stopped

    if not task.success:
        # 不成功会返回错误堆栈信息,构造一个错误信息
        workflow.status = 'workflow_exception'
        execute_result = ReviewSet(full_sql=workflow.sqlworkflowcontent.sql_content)
        execute_result.rows = [ReviewResult(
            stage='Execute failed',
            errlevel=2,
            stagestatus='异常终止',
            errormessage=task.result,
            sql=workflow.sqlworkflowcontent.sql_content)]
    elif task.result.warning or task.result.error:
        execute_result = task.result
        workflow.status = 'workflow_exception'
    else:
        execute_result = task.result
        workflow.status = 'workflow_finish'
    # 保存执行结果
    workflow.sqlworkflowcontent.execute_result = execute_result.json()
    workflow.sqlworkflowcontent.save()
    workflow.save()

    # 增加工单日志
    audit_id = Audit.detail_by_workflow_id(workflow_id=workflow_id,
                                           workflow_type=WorkflowDict.workflow_type['sqlreview']).audit_id
    Audit.add_log(audit_id=audit_id,
                  operation_type=6,
                  operation_type_desc='执行结束',
                  operation_info='执行结果:{}'.format(workflow.get_status_display()),
                  operator='',
                  operator_display='系统'
                  )

    # DDL工单结束后清空实例资源缓存
    if workflow.syntax_type == 1:
        r = get_redis_connection("default")
        for key in r.scan_iter(match='*insRes*', count=2000):
            r.delete(key)

    # 发送消息
    notify_for_execute(workflow)

0 View Complete Implementation : testcase.py
Copyright GNU Affero General Public License v3.0
Author : maas
    def cause_serialization_failure(self):
        """Trigger an honest, from the database, serialization failure."""
        # Helper to switch the transaction to SERIALIZABLE.
        def set_serializable():
            with connection.cursor() as cursor:
                cursor.execute("SET TRANSACTION ISOLATION LEVEL SERIALIZABLE")

        # Perform a conflicting update. This must run in a separate thread. It
        # also must begin after the beginning of the transaction in which we
        # will trigger a serialization failure AND commit before that other
        # transaction commits. This doesn't need to run with serializable
        # isolation.
        def do_conflicting_update():
            try:
                with transaction.atomic():
                    with connection.cursor() as cursor:
                        cursor.execute("UPDATE stest SET a = 2")
            finally:
                close_old_connections()

        def trigger_serialization_failure():
            # Fetch something first. This ensures that we're inside the
            # transaction, and that the database has a reference point for
            # calculating serialization failures.
            with connection.cursor() as cursor:
                cursor.execute("SELECT * FROM stest")
                cursor.fetchall()

            # Run do_conflicting_update() in a separate thread.
            thread = threading.Thread(target=do_conflicting_update)
            thread.start()
            thread.join()

            # Updating the same rows as do_conflicting_update() did will
            # trigger a serialization failure. We have to check the __cause__
            # to confirm the failure type as reported by PostgreSQL.
            with connection.cursor() as cursor:
                cursor.execute("UPDATE stest SET a = 4")

        if connection.in_atomic_block:
            # We're already in a transaction.
            set_serializable()
            trigger_serialization_failure()
        else:
            # Start a transaction in this thread.
            with transaction.atomic():
                set_serializable()
                trigger_serialization_failure()

0 View Complete Implementation : testcase.py
Copyright GNU Affero General Public License v3.0
Author : maas
    def cause_unique_violation(self):
        """Trigger an honest, from the database, unique violation.

        This may appear needlessly elaborate, but it's for a good reason.
        Indexes in PostgreSQL are a bit weird; they don't fully support MVCC
        so it's possible for situations like the following:

          CREATE TABLE foo (id SERIAL PRIMARY KEY);
          -- Session A:
          BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ;
          INSERT INTO foo (id) VALUES (1);
          -- Session B:
          BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ;
          SELECT id FROM foo;  -- Nothing.
          INSERT INTO foo (id) VALUES (1);  -- Hangs.
          -- Session A:
          COMMIT;
          -- Session B:
          ERROR:  duplicate key value violates unique constraint "..."
          DETAIL:  Key (id)=(1) already exists.

        Two things to note:

          1. Session B hangs when there's a potential conflict on id's index.

          2. Session B fails with a duplicate key error.

        Both differ from expectations:

          1. I would expect the transaction to continue optimistically and
             only fail if session A commits.

          2. I would expect a serialisation failure instead.

        This method jumps through hoops to reproduce the situation above so
        that we're testing against PostgreSQL's exact behaviour as of today,
        not the behaviour that we observed at a single moment in time.
        PostgreSQL may change its behaviour in later versions and this test
        ought to tell us about it.

        """
        # Helper to switch the transaction to REPEATABLE READ.
        def set_repeatable_read():
            with connection.cursor() as cursor:
                cursor.execute(
                    "SET TRANSACTION ISOLATION LEVEL " "REPEATABLE READ"
                )

        # Both threads / database sessions will attempt to insert this.
        conflicting_value = next(self.conflicting_values)

        # Perform a conflicting insert. This must run in a separate thread. It
        # also must begin after the beginning of the transaction in which we
        # will trigger a unique violation AND commit before that other
        # transaction commits. This doesn't need to run with any special
        # isolation; it just needs to be in a transaction.
        def do_conflicting_insert():
            try:
                with transaction.atomic():
                    with connection.cursor() as cursor:
                        cursor.execute(
                            "INSERT INTO uvtest VALUES (%s)",
                            [conflicting_value],
                        )
            finally:
                close_old_connections()

        def trigger_unique_violation():
            # Fetch something first. This ensures that we're inside the
            # transaction, and so the database has a reference point for
            # repeatable reads.
            with connection.cursor() as cursor:
                cursor.execute(
                    "SELECT 1 FROM uvtest WHERE a = %s", [conflicting_value]
                )
                self.astertIsNone(
                    cursor.fetchone(),
                    (
                        "We've seen through PostgreSQL impenetrable transaction "
                        "isolation — or so we once thought — to witness a "
                        "conflicting value from another database session. "
                        "Needless to say, this requires investigation."
                    ),
                )

            # Run do_conflicting_insert() in a separate thread and wait for it
            # to commit and return.
            thread = threading.Thread(target=do_conflicting_insert)
            thread.start()
            thread.join()

            # Still no sign of that conflicting value from here.
            with connection.cursor() as cursor:
                cursor.execute(
                    "SELECT 1 FROM uvtest WHERE a = %s", [conflicting_value]
                )
                self.astertIsNone(
                    cursor.fetchone(),
                    (
                        "PostgreSQL, once thought of highly in transactional "
                        "circles, has dropped its kimono and disgraced itself "
                        "with its wanton exhibition of conflicting values from "
                        "another's session."
                    ),
                )

            # Inserting the same row will trigger a unique violation.
            with connection.cursor() as cursor:
                cursor.execute(
                    "INSERT INTO uvtest VALUES (%s)", [conflicting_value]
                )

        if connection.in_atomic_block:
            # We're already in a transaction.
            set_repeatable_read()
            trigger_unique_violation()
        else:
            # Start a transaction in this thread.
            with transaction.atomic():
                set_repeatable_read()
                trigger_unique_violation()

0 View Complete Implementation : cli.py
Copyright MIT License
Author : meine-stadt-transparent
    def from_userinput(self, userinput: str, mirror: bool, ags: Optional[str]) -> None:
        body_id, entrypoint = self.get_entrypoint_and_body(userinput, mirror)
        importer = Importer(get_loader_from_system(entrypoint))
        body_data, dotenv = self.import_body_and_metadata(
            body_id, importer, userinput, ags
        )

        logger.info("Loading the bulk data from the oparl api")
        importer.fetch_lists_initial([body_data])

        # Also avoid "MySQL server has gone away" errors due to timeouts
        # https://stackoverflow.com/a/32720475/3549270
        db.close_old_connections()

        logger.info("Loading the data into the database")
        importer.import_objects()

        logger.info("Loading the files")
        importer.load_files(fallback_city=userinput)

        if dotenv:
            logger.info(
                "Done! Please add the following line to your dotenv file: \n\n"
                + dotenv
                + "\n"
            )

0 View Complete Implementation : importer.py
Copyright MIT License
Author : meine-stadt-transparent
    def fetch_list_initial(self, url: str) -> None:
        """ Saves a complete external list as flattened json to the database """
        logger.info("Fetching List {}".format(url))

        timestamp = timezone.now()
        next_url = url
        all_objects = set()
        while next_url:
            logger.info("Fetching {}".format(next_url))
            response = self.loader.load(next_url)

            objects = set()

            for element in response["data"]:
                externalized = externalize(element)
                for i in externalized:
                    if not i.data.get("deleted") and not i in all_objects:
                        objects.update(externalized)

            next_url = response["links"].get("next")

            # We can't have the that block outside the loop due to mysql's max_allowed_packet, manifesting
            # "MySQL server has gone away" https://stackoverflow.com/a/36637118/3549270
            # We'll be able to solve this a lot better after the django 2.2 update with ignore_conflicts
            try:
                # Also avoid "MySQL server has gone away" errors due to timeouts
                # https://stackoverflow.com/a/32720475/3549270
                db.close_old_connections()
                # The test are run with sqlite, which failed here with a TransactionManagementError:
                # "An error occurred in the current transaction. You can't execute queries until the end of the 'atomic' block."
                # That's why we build our own atomic block
                if settings.TESTING:
                    with transaction.atomic():
                        saved_objects = CachedObject.objects.bulk_create(objects)
                else:
                    saved_objects = CachedObject.objects.bulk_create(objects)
            except IntegrityError:
                saved_objects = set()
                for i in objects:
                    defaults = {
                        "data": i.data,
                        "to_import": True,
                        "oparl_type": i.oparl_type,
                    }
                    saved_objects.add(
                        CachedObject.objects.update_or_create(
                            url=i.url, defaults=defaults
                        )[0]
                    )

            all_objects.update(saved_objects)
        logger.info("Found {} objects in {}".format(len(all_objects), url))
        ExternalList(url=url, last_update=timestamp).save()

0 View Complete Implementation : document_parsing.py
Copyright MIT License
Author : meine-stadt-transparent
def extract_locations(
    text: str, fallback_city: Optional[str], pipeline: Optional[AddressPipeline] = None
) -> List[Location]:
    if not text:
        return []

    if not fallback_city:
        fallback_city = Body.objects.get(id=settings.SITE_DEFAULT_BODY).short_name

    if not pipeline:
        pipeline = AddressPipeline(create_geoextract_data())

    if len(text) < settings.TEXT_CHUNK_SIZE:
        found_locations = pipeline.extract(text)
    else:
        # Workaround for https://github.com/stadt-karlsruhe/geoextract/issues/7
        found_locations = []
        for i in range(0, len(text), settings.TEXT_CHUNK_SIZE):
            # We can't use set because the dicts in the returned list are unhashable
            for location in pipeline.extract(text[i : i + settings.TEXT_CHUNK_SIZE]):
                if location not in found_locations:
                    found_locations.append(location)

    locations = []
    for found_location in found_locations:
        if "name" in found_location and len(found_location["name"]) < 5:
            continue

        location_name = format_location_name(found_location)

        search_str = get_search_string(found_location, fallback_city)
        defaults = {
            "description": location_name,
            "is_official": False,
            # This cutoff comes from a limitation of InnoDB
            "search_str": search_str[:767],
        }
        # Avoid "MySQL server has gone away" errors due to timeouts
        # https://stackoverflow.com/a/32720475/3549270
        db.close_old_connections()
        location, created = Location.objects_with_deleted.get_or_create(
            search_str=search_str, defaults=defaults
        )

        if created:
            location.geometry = geocode(search_str)
            location.save()

        locations.append(location)

    return locations

0 View Complete Implementation : django_db_middleware.py
Copyright Apache License 2.0
Author : mercadona
    def pre_process_message(self, *args):
        db.close_old_connections()

0 View Complete Implementation : django_db_middleware.py
Copyright Apache License 2.0
Author : mercadona
    def post_process_message(self):
        db.close_old_connections()

0 View Complete Implementation : signals.py
Copyright BSD 2-Clause "Simplified" License
Author : NicolasLM
@signals.job_started.connect_via(spin.namespace)
def job_started(*args, job=None, **kwargs):
    reset_queries()
    close_old_connections()

0 View Complete Implementation : signals.py
Copyright BSD 2-Clause "Simplified" License
Author : NicolasLM
@signals.job_finished.connect_via(spin.namespace)
def job_finished(*args, job=None, **kwargs):
    close_old_connections()