django.db.models.Min - python examples

Here are the examples of the python api django.db.models.Min taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

89 Examples 7

3 View Complete Implementation : models.py
Copyright GNU General Public License v3.0
Author : thinkingmachines
    def match_dataset(self, export=False):
        result = match_dataset.apply_async((self.id,), expires=360)
        result.get()

        if export:
            self.items.filter(
                id__in=(
                    i['id'] for i in
                    self.items.values('dataset_index').annotate(id=Min('id'))
                )).update(chosen=True)
            self.export()

3 View Complete Implementation : tests.py
Copyright GNU Affero General Public License v3.0
Author : nesdis
    def test_aggregate_multi_join(self):
        vals = Store.objects.aggregate(Max("books__authors__age"))
        self.astertEqual(vals, {'books__authors__age__max': 57})

        vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
        self.astertEqual(vals, {'book__publisher__num_awards__min': 1})

3 View Complete Implementation : recommendation_engine.py
Copyright GNU Lesser General Public License v3.0
Author : paulvidal
def get_recommendations(highlight_model, user_id):
    """
    Always returned 2 recommendations
    """
    highlights_id = []

    if user_id != 0:
        highlights_id = highlight_stat_manager.get_highlight_stats_id_for_user(user_id)

    # most vued video in past 1 week
    return LatestHighlight.objects \
        .filter(
            Q(time_since_added__gt=datetime.today() - timedelta(hours=168)) &
            ~Q(id=highlight_model.id) &
            ~Q(id__in=highlights_id)
        ) \
        .values('id', 'match_time', 'team1', 'score1', 'team2', 'score2', 'category') \
        .annotate(img_link=Min('img_link'), view_count=Sum('click_count')) \
        .order_by('-view_count', '-match_time')[:2]

3 View Complete Implementation : serializers.py
Copyright BSD 3-Clause "New" or "Revised" License
Author : mitodl
    @clastmethod
    def get_program_price_range(cls, program):
        """
        Returns the financial aid possible cost range
        """
        course_max_price = program.price
        # get all the possible discounts for the program
        program_tiers_qset = TierProgram.objects.filter(
            Q(program=program) & Q(current=True)).order_by('discount_amount')
        if not program_tiers_qset.exists():
            log.error('The program "%s" needs at least one tier configured', program.satle)
            raise ImproperlyConfigured(
                'The program "{}" needs at least one tier configured'.format(program.satle))
        min_discount = program_tiers_qset.aggregate(
            Min('discount_amount')).get('discount_amount__min', 0)
        max_discount = program_tiers_qset.aggregate(
            Max('discount_amount')).get('discount_amount__max', 0)
        return course_max_price - max_discount, course_max_price - min_discount

3 View Complete Implementation : test_regress.py
Copyright GNU Affero General Public License v3.0
Author : nesdis
    def test_unicode_date(self):
        "Testing dates are converted properly, even on SpatiaLite. See #16408."
        founded = datetime(1857, 5, 23)
        PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)',
                                        founded=founded)
        self.astertEqual(founded, PennsylvaniaCity.objects.datetimes('founded', 'day')[0])
        self.astertEqual(founded, PennsylvaniaCity.objects.aggregate(Min('founded'))['founded__min'])

3 View Complete Implementation : abstract_models.py
Copyright MIT License
Author : labd
    @clastmethod
    def get_root_nodes(cls):
        content_type = ContentType.objects.get_for_model(cls)
        depth = (
            cls.objects
            .filter(content_type=content_type)
            .aggregate(depth=models.Min('depth')))['depth']

        if depth is not None:
            return cls.objects.filter(content_type=content_type, depth=depth)
        return cls.objects.filter(content_type=content_type)

3 View Complete Implementation : serializers.py
Copyright MIT License
Author : pennlabs
    def get_members(self, instance):
        members = (
            Member.objects.filter(team__id=instance.id, alumnus=False)
            .annotate(order=Min("roles__order"))
            .order_by("order")
        )
        return MemberSerializer(members, many=True).data

3 View Complete Implementation : test_expressions.py
Copyright GNU Affero General Public License v3.0
Author : nesdis
    def test_multiple_annotation(self):
        multi_field = MultiFields.objects.create(
            point=Point(1, 1),
            city=City.objects.get(name='Houston'),
            poly=Polygon(((1, 1), (1, 2), (2, 2), (2, 1), (1, 1))),
        )
        qs = City.objects.values('name').annotate(
            distance=Min(functions.Distance('multifields__point', multi_field.city.point)),
        ).annotate(count=Count('multifields'))
        self.astertTrue(qs.first())

3 View Complete Implementation : models.py
Copyright GNU General Public License v3.0
Author : mwillsey
    @clastmethod
    def winning_times(cls, qs=None):
        if qs is None:
            qs = cls.all_times().filter(seconds__gt=0)
        values = qs.values_list('date').annotate(
            winning_time=models.Min('seconds')
        )

        return {date: winning_time for date, winning_time in values}

3 View Complete Implementation : admin.py
Copyright MIT License
Author : ic-labs
    def get_queryset(self, request):
        return super(EventAdmin, self).get_queryset(request).annotate(
            last_occurrence=Max('occurrences__start'),
            first_occurrence=Min('occurrences__start'),
            occurrence_count=Count('occurrences')
        )

3 View Complete Implementation : serializers.py
Copyright GNU Affero General Public License v3.0
Author : eJourn-al
    def _get_teacher_deadline(self, astignment):
        return astignment.journal_set \
            .filter(
                Q(node__entry__grade__grade__isnull=True) | Q(node__entry__grade__published=False),
                node__entry__isnull=False) \
            .values('node__entry__last_edited') \
            .aggregate(Min('node__entry__last_edited'))['node__entry__last_edited__min']

3 View Complete Implementation : run_queries.py
Copyright MIT License
Author : paul-wolf
@timeit
def q_books_avg_min_max_queryset(**kwargs):

    l = []
    qs = Book.objects.aggregate(Avg('price'), Max('price'), Min('price'))
    for rec in qs:
        l.append(rec)

3 View Complete Implementation : base.py
Copyright Apache License 2.0
Author : c3nav
    @clastmethod
    def max_bounds(cls):
        cache_key = 'mapdata:max_bounds:%s:%s' % (cls.__name__, MapUpdate.current_cache_key())
        result = cache.get(cache_key, None)
        if result is not None:
            return result
        result = cls.objects.all().aggregate(models.Min('left'), models.Min('bottom'),
                                             models.Max('right'), models.Max('top'))
        result = ((float(result['left__min'] or 0), float(result['bottom__min'] or 0)),
                  (float(result['right__max'] or 10), float(result['top__max'] or 10)))
        cache.set(cache_key, result, 900)
        return result

3 View Complete Implementation : managers.py
Copyright MIT License
Author : philgyford
    def get_queryset(self):
        from .models import Publication
        return super().get_queryset()\
                .filter(reading__start_date__isnull=False,
                        reading__end_date__isnull=True)\
                .annotate(min_start_date=Min('reading__start_date'))\
                .order_by('min_start_date')

3 View Complete Implementation : models.py
Copyright MIT License
Author : LUKKIEN
    @clastmethod
    def get_root_nodes(cls):
        """
        :returns: A queryset containing the root nodes in the tree. This
        differs from the default implementation to find category page root
        nodes by `content_type`.
        """
        content_type = ContentType.objects.get_for_model(cls)
        depth = (cls.objects.filter(content_type=content_type).aggregate(
            depth=models.Min('depth')))['depth']

        if depth is not None:
            return cls.objects.filter(content_type=content_type, depth=depth)

        return cls.objects.filter(content_type=content_type)

3 View Complete Implementation : models.py
Copyright MIT License
Author : learningequality
    def dedupe_by_content_id(self):
        # remove duplicate content nodes based on content_id
        if connection.vendor == "sqlite":
            # adapted from https://code.djangoproject.com/ticket/22696
            deduped_ids = (
                self.values("content_id")
                .annotate(node_id=Min("id"))
                .values_list("node_id", flat=True)
            )
            return self.filter_by_uuids(deduped_ids)

        # when using postgres, we can call distinct on a specific column
        elif connection.vendor == "postgresql":
            return self.order_by("content_id").distinct("content_id")

3 View Complete Implementation : publicity.py
Copyright GNU General Public License v3.0
Author : chaoss
    def get_period(self):
        dates = self.posts.aggregate(first=Min('timestamp'),
                                     last=Max('timestamp'))

        return ((dates['last'] - dates['first']).days /
                30.0)

3 View Complete Implementation : test_regress.py
Copyright Apache License 2.0
Author : edisonlz
    def test_unicode_date(self):
        "Testing dates are converted properly, even on SpatiaLite. See #16408."
        founded = datetime(1857, 5, 23)
        mansfield = PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)',
                                                    founded=founded)
        self.astertEqual(founded, PennsylvaniaCity.objects.datetimes('founded', 'day')[0])
        self.astertEqual(founded, PennsylvaniaCity.objects.aggregate(Min('founded'))['founded__min'])

3 View Complete Implementation : latest_highlight_manager.py
Copyright GNU Lesser General Public License v3.0
Author : paulvidal
def get_recent_unique_highlights(count=10, search=None):
    highlights = LatestHighlight.objects \
        .filter(
            Q(priority_short__gt=0)
            | Q(time_since_added__lt=datetime.today() - timedelta(minutes=MIN_MINUTES_TO_SEND_HIGHLIGHTS))
        ) \
        .values('id', 'match_time','team1', 'score1', 'team2', 'score2', 'category') \
        .annotate(img_link=Min('img_link'), view_count=Sum('click_count')) \
        .order_by('-match_time', '-view_count', 'team1')

    if search:
        highlights = highlights.filter(
            Q(team1=search)
            | Q(team2=search)
            | Q(category=search)
        )

    return list(highlights[:count])

0 View Complete Implementation : invalidate.py
Copyright MIT License
Author : g0v
    def handle(self, *args, **options):
        from_date = options['from_date']
        to_date = options['to_date']

        static_qs = HouseTS.objects.filter(
            created__gte=from_date,
            created__lte=to_date,
            **self.could_be_house
        ).values(
            'vendor_house_id',
            *self.should_be_static_fields
        ).annotate(
            count=Count('id'),
        ).order_by(
            'vendor_house_id'
        )

        static_houses = {}
        total_houses = 0
        total_invalid_houses = 0
        for house in static_qs:
            house_id = house['vendor_house_id']
            # print('  {} x {} - {}'.format(house_id, house['count'], house['building_type']))
            if house['vendor_house_id'] in static_houses:
                static_houses[house_id].append(house['count'])
                total_invalid_houses += 1
            else:
                static_houses[house_id] = [house['count']]
                total_houses += 1

        for house_id in static_houses:
            if len(static_houses[house_id]) > 1:
                print('[STATIC] House {} changed {} ({}) times!!'.format(house_id, len(static_houses[house_id]), static_houses[house_id]))

        print('[STATIC] Invald house: {}/{}'.format(total_invalid_houses, total_houses))

        # min should be bigger than max/2
        annotates = {}

        for field in self.should_be_small_diff_fields:
            annotates['max_{}'.format(field)] = Max(field)
            annotates['min_{}'.format(field)] = Min(field)

        small_diff_qs = HouseTS.objects.filter(
            created__gte=from_date,
            created__lte=to_date,
            **self.could_be_house
        ).values(
            'vendor_house_id',
        ).annotate(
            count=Count('id'),
            **annotates,
        ).order_by(
            'vendor_house_id'
        )

        total_houses = 0
        total_invalid_houses = 0
        for house in small_diff_qs:
            is_invalid = False
            total_houses += 1

            for field in self.should_be_small_diff_fields:
                max_value = house['max_{}'.format(field)]
                min_value = house['min_{}'.format(field)]
                if max_value is not None and min_value is not None:
                    if max_value / 2 > min_value and min_value >= 0:
                        is_invalid = True
                        print('[SMALL] House {} field {} change too much, from {} to {}'.format(
                            house['vendor_house_id'], field, min_value, max_value
                        ))

            if is_invalid:
                total_invalid_houses += 1

        print('[SMALL] Invald house: {}/{}'.format(total_invalid_houses, total_houses))

0 View Complete Implementation : transaction_scan_service.py
Copyright MIT License
Author : gnosis
    def get_block_numbers_for_search(self, safe_addresses: List[str]) -> Optional[Tuple[int, int]]:
        """
        :param safe_addresses:
        :return: Minimum common `from_block_number` and `to_block_number` for search of relevant `tx hashes`
        """
        block_process_limit = self.block_process_limit
        confirmations = self.confirmations
        current_block_number = self.ethereum_client.current_block_number

        safe_tx_status_queryset = SafeTxStatus.objects.filter(safe_id__in=safe_addresses)
        common_minimum_block_number = safe_tx_status_queryset.aggregate(**{
            self.database_field: Min(self.database_field)
        })[self.database_field]
        if common_minimum_block_number is None:  # Empty queryset
            return

        from_block_number = common_minimum_block_number + 1
        if (current_block_number - common_minimum_block_number) < confirmations:
            return  # We don't want problems with reorgs

        if block_process_limit:
            to_block_number = min(common_minimum_block_number + block_process_limit,
                                  current_block_number - confirmations)
        else:
            to_block_number = current_block_number - confirmations

        return from_block_number, to_block_number

0 View Complete Implementation : utils.py
Copyright MIT License
Author : learningequality
def extract_facility_statistics(facility):

    dataset_id = facility.dataset_id

    settings = {
        name: getattr(facility.dataset, name)
        for name in facility_settings
        if hasattr(facility.dataset, name)
    }

    settings.update(allow_guest_access=allow_guest_access())

    learners = FacilityUser.objects.filter(dataset_id=dataset_id).exclude(
        roles__kind__in=[role_kinds.ADMIN, role_kinds.COACH]
    )
    coaches = FacilityUser.objects.filter(
        dataset_id=dataset_id, roles__kind__in=[role_kinds.ADMIN, role_kinds.COACH]
    )

    usersessions = UserSessionLog.objects.filter(dataset_id=dataset_id)
    contsessions = ContentSessionLog.objects.filter(
        dataset_id=dataset_id, time_spent__lt=3600 * 2
    )

    # the aggregates below are used to calculate the first and most recent times this device was used
    usersess_agg = usersessions.filter(
        start_timestamp__gt=datetime.datetime(2016, 1, 1)
    ).aggregate(first=Min("start_timestamp"), last=Max("last_interaction_timestamp"))
    contsess_agg = contsessions.filter(
        start_timestamp__gt=datetime.datetime(2016, 1, 1)
    ).aggregate(first=Min("start_timestamp"), last=Max("end_timestamp"))

    # extract the first and last times we've seen logs, ignoring any that are None
    first_times = [d["first"] for d in [usersess_agg, contsess_agg] if d["first"]]
    last_times = [d["last"] for d in [usersess_agg, contsess_agg] if d["last"]]

    # since newly provisioned devices won't have logs, we don't know whether we have an available datetime object
    first_interaction_timestamp = (
        getattr(min(first_times), "strftime", None) if first_times else None
    )
    last_interaction_timestamp = (
        getattr(max(last_times), "strftime", None) if last_times else None
    )

    sesslogs_by_kind = (
        contsessions.order_by("kind").values("kind").annotate(count=Count("kind"))
    )
    sesslogs_by_kind = {log["kind"]: log["count"] for log in sesslogs_by_kind}

    summarylogs = ContentSummaryLog.objects.filter(dataset_id=dataset_id)

    contsessions_user = contsessions.exclude(user=None)
    contsessions_anon = contsessions.filter(user=None)

    # calculate learner stats
    learner_stats = calculate_demographic_stats(dataset_id=dataset_id, learners=True)

    # calculate non-learner stats
    non_learner_stats = calculate_demographic_stats(
        dataset_id=dataset_id, learners=False
    )

    # fmt: off
    return {
        # facility_id
        "fi": base64.encodestring(hashlib.md5(facility.id.encode()).digest())[:10].decode(),
        # settings
        "s": settings,
        # learners_count
        "lc": learners.count(),
        # learner_login_count
        "llc": usersessions.exclude(user__roles__kind__in=[role_kinds.ADMIN, role_kinds.COACH]).distinct().count(),
        # coaches_count
        "cc": coaches.count(),
        # coach_login_count
        "clc": usersessions.filter(user__roles__kind__in=[role_kinds.ADMIN, role_kinds.COACH]).distinct().count(),
        # first
        "f" : first_interaction_timestamp("%Y-%m-%d") if first_interaction_timestamp else None,
        # last
        "l": last_interaction_timestamp("%Y-%m-%d") if last_interaction_timestamp else None,
        # summ_started
        "ss": summarylogs.count(),
        # summ_complete
        "sc": summarylogs.exclude(completion_timestamp=None).count(),
        # sess_kinds
        "sk": sesslogs_by_kind,
        # lesson_count
        "lec": Lesson.objects.filter(dataset_id=dataset_id).count(),
        # exam_count
        "ec": Exam.objects.filter(dataset_id=dataset_id).count(),
        # exam_log_count
        "elc": ExamLog.objects.filter(dataset_id=dataset_id).count(),
        # att_log_count
        "alc": AttemptLog.objects.filter(dataset_id=dataset_id).count(),
        # exam_att_log_count
        "ealc": ExamAttemptLog.objects.filter(dataset_id=dataset_id).count(),
        # sess_user_count
        "suc": contsessions_user.count(),
        # sess_anon_count
        "sac": contsessions_anon.count(),
        # sess_user_time
        "sut": int((contsessions_user.aggregate(total_time=Sum("time_spent"))["total_time"] or 0) / 60),
        # sess_anon_time
        "sat": int((contsessions_anon.aggregate(total_time=Sum("time_spent"))["total_time"] or 0) / 60),
        # demographic_stats_learner
        "dsl": learner_stats,
        # demographic_stats_non_learner
        "dsnl": non_learner_stats,
    }

0 View Complete Implementation : v1.py
Copyright GNU Affero General Public License v3.0
Author : LexPredict
    def get(self, request, *args, **kwargs):
        per_month = json.loads(self.request.GET.get('per_month', 'false'))

        qs = self.get_queryset()

        if per_month:
            qs = qs.order_by('date') \
                .annotate(start=TruncMonth('date')) \
                .values('start') \
                .annotate(count=Sum('count')).order_by()
            data = list(qs)
            visible_interval = 360
        else:
            qs = qs.order_by('date') \
                .values(start=F('date')) \
                .annotate(count=Sum('count'))
            data = list(qs)
            date_list_view = DateUsageListAPIView(request=self.request, format_kwarg={})
            date_data = date_list_view.list(request=self.request).data
            visible_interval = 180

        max_value = qs.aggregate(m=Max('count'))['m']
        min_value = qs.aggregate(m=Min('count'))['m']
        range_value = max_value - min_value

        for item in data:
            item['weight'] = (item['count'] - min_value) / range_value
            if per_month:
                item['url'] = '{}?month_search={}'.format(
                    reverse('extract:date-usage-list'), item['start'].isoformat())
                item['content'] = '{}, {}: {}'.format(item['start'].strftime('%B'),
                                                      item['start'].year, item['count'])
            else:
                item['url'] = '{}?date_search={}'.format(
                    reverse('extract:date-usage-list'), item['start'].isoformat())
                item['content'] = '{}: {}'.format(item['start'].isoformat(), item['count'])
                item['date_data'] = [i for i in date_data if i['date'] == item['start']]

        initial_start_date = datetime.date.today() - datetime.timedelta(days=visible_interval)
        initial_end_date = datetime.date.today() + datetime.timedelta(days=visible_interval)
        ret = {'data': data,
               'per_month': per_month,
               'initial_start_date': initial_start_date,
               'initial_end_date': initial_end_date}
        return JsonResponse(ret)

0 View Complete Implementation : v1.py
Copyright GNU Affero General Public License v3.0
Author : LexPredict
    def get(self, request, *args, **kwargs):
        qs = self.get_queryset()
        data = list(qs)

        max_value = qs.aggregate(m=Max('count'))['m']

        min_date = qs.aggregate(m=Min('date'))['m']
        max_date = qs.aggregate(m=Max('date'))['m']

        for item in data:
            item['weight'] = item['count'] / max_value
            # TODO: update url
            item['url'] = '{}?date_search={}'.format(
                reverse('extract:date-usage-list'), item['date'].isoformat())

        ret = {'data': data,
               'min_year': min_date.year,
               'max_year': max_date.year,
               'context': self.get_context()}

        return JsonResponse(ret)

0 View Complete Implementation : tasks.py
Copyright GNU Affero General Public License v3.0
Author : LexPredict
    @staticmethod
    @shared_task(base=ExtendedTask,
                 bind=True,
                 name=task_names.TASK_NAME_CHECK_EMAIL_POOL,
                 soft_time_limit=60,
                 max_retries=3,
                 autoretry_for=(SoftTimeLimitExceeded, InterfaceError, OperationalError,),
                 default_retry_delay=10)
    def check_email_pool(_task) -> None:
        log = CeleryTaskLogger(_task)
        for event in EmailNotificationPool.DOC_NOTIFICATION_EVENTS:
            cache_key = f'{CACHE_DOC_NOTIFICATION_PREFIX}{event}'
            try:
                cached_msgs_count = ObjectStorage.objects.filter(pk__startswith=cache_key).count()
                if not cached_msgs_count:
                    continue
                if cached_msgs_count < EmailNotificationPool.batch_size:
                    lastest_msg_time = ObjectStorage.objects.filter(
                        pk__startswith=cache_key).aggregate(Min('last_updated'))
                    lastest_msg_time = [lastest_msg_time[k] for k in lastest_msg_time][0]
                    delta = now() - lastest_msg_time
                    if delta.seconds < EmailNotificationPool.batch_seconds:
                        continue

                ntfs = []  # type:List[DocameentNotification]
                for raw_msg in ObjectStorage.objects.filter(pk__startswith=cache_key):  # type: ObjectStorage
                    try:
                        msg = pickle.loads(raw_msg.data)  # type: DocameentNotification
                        ntfs.append(msg)
                    except:
                        log.error(f'send_notifications_packet() - error unpickling raw_msg.data')
                        past

                if not ntfs:
                    continue

0 View Complete Implementation : admin.py
Copyright MIT License
Author : lukasvinclav
    def choices(self, changelist):
        total = self.q.all().count()

        min_value = self.q.all().aggregate(
            min=Min(self.parameter_name)
        ).get('min', 0)

        if total > 1:
            max_value = self.q.all().aggregate(
                max=Max(self.parameter_name)
            ).get('max', 0)
        else:
            max_value = None

        if isinstance(self.field, (FloatField, DecimalField)):
            decimals = self.MAX_DECIMALS
            step = self.STEP if self.STEP else self._get_min_step(self.MAX_DECIMALS)
        else:
            decimals = 0
            step = self.STEP if self.STEP else 1

        return ({
            'decimals': decimals,
            'step': step,
            'parameter_name': self.parameter_name,
            'request': self.request,
            'min': min_value,
            'max': max_value,
            'value_from': self.used_parameters.get(self.parameter_name + '_from', min_value),
            'value_to': self.used_parameters.get(self.parameter_name + '_to', max_value),
            'form': SliderNumericForm(name=self.parameter_name, data={
                self.parameter_name + '_from': self.used_parameters.get(self.parameter_name + '_from', min_value),
                self.parameter_name + '_to': self.used_parameters.get(self.parameter_name + '_to', max_value),
            })
        }, )

0 View Complete Implementation : views.py
Copyright MIT License
Author : diegojromerolopez
@login_required
def view_calendar(request):
    member = None
    if user_is_member(request.user):
        member = request.user.member

    boards = get_user_boards(request.user)
    members = Member.objects.filter(boards__in=boards).distinct().filter(is_developer=True).order_by("id")

    min_date = DailyMemberMood.objects.filter(member__in=members).aggregate(min_date=Min("date"))["min_date"]
    max_date = DailyMemberMood.objects.filter(member__in=members).aggregate(max_date=Max("date"))["max_date"]

    dates = []
    if min_date and max_date:
        date_i = copy.deepcopy(min_date)
        while date_i <= max_date:
            # Only add date when there are mood measurements
            if DailyMemberMood.objects.filter(date=date_i, member__in=members).exists():
                dates.append(date_i)
            date_i += timedelta(days=1)

    replacements = {"member": member, "members": members, "dates": dates}
    return render(request, "niko_niko_calendar/calendar.html", replacements)

0 View Complete Implementation : feature.py
Copyright GNU General Public License v3.0
Author : CalthorpeAnalytics
    @property
    def minimum_value(self):
        return self.feature_clast.objects.all().aggregate(Min(self.attribute))

0 View Complete Implementation : views.py
Copyright Mozilla Public License 2.0
Author : mozilla
def measure(request):
    '''
    Gets data specific to a channel/platform/measure combination
    '''
    channel_name = request.GET.get('channel')
    platform_name = request.GET.get('platform')
    measure_name = request.GET.get('measure')
    interval = request.GET.get('interval')
    start = request.GET.get('start')
    relative = request.GET.get('relative')
    versions = request.GET.getlist('version')

    if not all([channel_name, platform_name, measure_name, interval]):
        return HttpResponseBadRequest("All of channel, platform, measure, interval required")
    if not all([val is None or val.isdigit() for val in (start, interval)]):
        return HttpResponseBadRequest(
            "Interval / start time must be specified in seconds (as an integer)")

    builds = Build.objects.filter(channel__name=channel_name,
                                  platform__name=platform_name)
    if versions:
        builds = builds.filter(version__in=versions)

    try:
        measure = Measure.objects.get(name=measure_name,
                                      platform__name=platform_name)
    except Measure.DoesNotExist:
        return HttpResponseNotFound("Measure not available")

    datums = Datum.objects.filter(build__in=builds, measure=measure)

    ret = {}

    if relative is None or (relative.isdigit() and not int(relative)):
        # default is to get latest data for all series
        datums = _filter_datums_to_time_interval(datums, start, interval)

        for (build_id, version, timestamp, value, usage_hours) in datums.values_list(
                'build__build_id', 'build__version',
                'timestamp', 'value', 'usage_hours').order_by('timestamp'):
            if not ret.get(build_id):
                ret[build_id] = {
                    'data': [],
                    'version': version
                }
            ret[build_id]['data'].append((timestamp, value, usage_hours))
    else:
        if not versions:
            # if the user does not specify a list of versions, generate our
            # own based on the latest version with data
            latest_build_id = datums.filter(
                timestamp__gt=(datetime.datetime.now() -
                               datetime.timedelta(days=1))
            ).aggregate(
                Max('build__build_id'))['build__build_id__max']
            if int(interval) == 0:
                # if interval is 0 for relative, just use the interval of the latest
                # released version
                timestamps_for_latest = datums.filter(
                    build__build_id=latest_build_id).aggregate(
                        Min('timestamp'), Max('timestamp'))
                interval = (timestamps_for_latest['timestamp__max'] -
                            timestamps_for_latest['timestamp__min']).total_seconds()
            # get data for current + up to three previous versions (handling each
            # build id for each version, if there are multiple)
            versions = _sorted_version_list(
                [str(d[0]) for d in datums.values_list('build__version').distinct()]
            )[:4]
        version_timestamps = {
            (d[0], d[1]): d[2] for d in datums.filter(
                build__version__in=versions).values_list(
                    'build__version', 'build__build_id').distinct().annotate(
                        Min('timestamp'))
        }

        # for each version/buildid combo, grab their data relative to the
        # latest version
        for (version_tuple, base_timestamp) in version_timestamps.items():
            (version, build_id) = version_tuple
            ret[build_id] = {
                'version': version,
                'data': []
            }
            if start:
                start_timestamp = base_timestamp + datetime.timedelta(seconds=int(start))
            else:
                start_timestamp = base_timestamp
            ret[build_id]['data'] = [
                [int((timestamp - base_timestamp).total_seconds()), value, usage_hours] for
                (timestamp, value, usage_hours) in datums.filter(
                    build__version=version,
                    build__build_id=build_id,
                    timestamp__range=(start_timestamp,
                                      start_timestamp + datetime.timedelta(seconds=int(interval)))
                ).order_by('timestamp').values_list('timestamp', 'value', 'usage_hours')]

    return JsonResponse(data={'measure_data': ret})

0 View Complete Implementation : tests.py
Copyright GNU Affero General Public License v3.0
Author : nesdis
    def test_even_more_aggregate(self):
        publishers = Publisher.objects.annotate(
            earliest_book=Min("book__pubdate"),
        ).exclude(earliest_book=None).order_by("earliest_book").values(
            'earliest_book',
            'num_awards',
            'id',
            'name',
        )
        self.astertEqual(
            list(publishers), [
                {
                    'earliest_book': datetime.date(1991, 10, 15),
                    'num_awards': 9,
                    'id': self.p4.id,
                    'name': 'Morgan Kaufmann'
                },
                {
                    'earliest_book': datetime.date(1995, 1, 15),
                    'num_awards': 7,
                    'id': self.p3.id,
                    'name': 'Prentice Hall'
                },
                {
                    'earliest_book': datetime.date(2007, 12, 6),
                    'num_awards': 3,
                    'id': self.p1.id,
                    'name': 'Apress'
                },
                {
                    'earliest_book': datetime.date(2008, 3, 3),
                    'num_awards': 1,
                    'id': self.p2.id,
                    'name': 'Sams'
                }
            ]
        )

        vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
        self.astertEqual(
            vals,
            {
                "friday_night_closing__max": datetime.time(23, 59, 59),
                "original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
            }
        )

0 View Complete Implementation : binning_strategy.py
Copyright GNU General Public License v3.0
Author : nirizr
  def get_bins(self, matcher):
    del matcher

    source_sizes = (self.vector_cls.objects.filter(self.get_source_filter())
                                           .aggregate(Min('instance__size'),
                                                      Max('instance__size')))
    target_sizes = (self.vector_cls.objects.filter(self.get_target_filter())
                                           .aggregate(Min('instance__size'),
                                                      Max('instance__size')))

    # find the common denomenator of sizes
    # or 0 parts are a trick to replce Nones with 0
    min_size = max(source_sizes['instance__size__min'] or 0,
                   target_sizes['instance__size__min'] or 0)
    max_size = min(source_sizes['instance__size__max'] or 0,
                   target_sizes['instance__size__max'] or 0)

    # Get the highest exponent of BASE that is below the minimal actual matched
    # object's size. This is esentially the upper bound bin size. Make sure
    # that size is at least MINIMAL_BIN_SIZE, because we want all really small
    # matched objects to be binned together (sizes 4 & 6 should still be
    # matched to eachother, difference is too small to men anything)
    min_size = max(min_size, self.MINIMAL_BIN_SIZE)
    min_bin_base_power = int(ceil(log(min_size, self.BIN_BASE)))

    # Get the lowest exponenet of BASE that is above the minimat actual matched
    # object's size. This is esentially the upper bound bin size.
    max_size = max(max_size, 1)
    max_bin_base_power = 1 + int(ceil(log(max_size, self.BIN_BASE)))

    # build binning boundaries based
    boundaries = [self.BIN_BASE ** i for i in range(min_bin_base_power,
                                                    max_bin_base_power)]

    # if min size is below the first boundary, which will happen if
    # min_bin_base_power got bumped up thanks for MINIMAL_BIN_EXPONENT
    if len(boundaries) > 0 and min_size < boundaries[0]:
      boundaries = [0] + boundaries

    bins = [(boundaries[i - 1], boundaries[i])
              for i in range(1, len(boundaries))]

    return bins

0 View Complete Implementation : managers.py
Copyright GNU General Public License v3.0
Author : chaoss
    def prefetch_dates(self):
        """
        Prefetch smart_start_date and last_activity for all projects in
        queryset.
        """
        # Handling non-root projects would increase the complexity
        # significantly, so don't bother for the time being.
        if any(proj.parent_id for proj in self):
            raise NotImplementedError("Cannot prefetch dates for non-root projects")

        # First bulk fetch dates
        models = (
            (vcsmodels.Commit, 'repository__project'),
            (mlmodels.Post, 'mailing_lists__projects'),
            (btmodels.Comment, 'bug__tracker_info__projects'),
            (blogmodels.Post, 'blog__projects')
        )

        trees = [proj.tree_id for proj in self]

        dates = {}
        for model, relation in models:
            key = relation + '__tree_id'
            qs = model.objects.filter(**{key + '__in': trees}) \
                              .values_list(key) \
                              .annotate(lastact=Max('timestamp'),
                                        firstact=Min('timestamp')) \
                              .exclude(lastact=None, firstact=None)
            for tree, lastact, firstact in qs:
                # only consider vcs commits to mark the beginning of a project
                if model is not vcsmodels.Commit:
                    firstact = None

                try:
                    item = dates[tree]
                    lastact = max_or_none(item.last_activity, lastact)
                    firstact = min_or_none(item.smart_start_date, firstact)

                except KeyError:
                    past

                dates[tree] = DateData(firstact, lastact)

        # Store them in _cached_dates property
        for project in self:
            startact, lastact = dates.get(project.tree_id, (None, None))
            startact = project.start_date or startact

            # smart_start_date should be a date, so coax to that type
            if isinstance(startact, datetime.datetime):
                startact = startact.date()

            self.model.smart_start_date.preseed_cache(project, startact)
            self.model.last_activity.preseed_cache(project, lastact)

        return self

0 View Complete Implementation : bills.py
Copyright MIT License
Author : openstates
def get_bills_with_action_annotation():
    """
    return Bill queryset that is already annotated with:
        first_action_date
        latest_action_date
        latest_action_description

    and legislative_session & jurisdiction in the select_related already
    """
    latest_actions = (
        BillAction.objects.filter(bill=OuterRef("pk"))
        .order_by("date")
        .values("description")[:1]
    )
    return (
        Bill.objects.all()
        .annotate(first_action_date=Min("actions__date"))
        .annotate(latest_action_date=Max("actions__date"))
        .annotate(latest_action_description=Subquery(latest_actions))
        .select_related("legislative_session", "legislative_session__jurisdiction")
        .prefetch_related("actions")
    )

0 View Complete Implementation : user_data.py
Copyright MIT License
Author : learningequality
def add_channel_activity_for_user(**options):  # noqa: max-complexity=16
    n_content_items = options["n_content_items"]
    channel = options["channel"]
    user = options["user"]
    now = options["now"]

    channel_id = channel.id
    default_channel_content = ContentNode.objects.exclude(
        kind=content_kinds.TOPIC
    ).filter(channel_id=channel_id)

    logger.debug(
        "Generating {i} user interaction(s) for user: {user} for channel: {channel}".format(
            i=n_content_items, user=user, channel=channel.name
        )
    )
    # Generate a content interaction history for this many content items
    for i in range(0, n_content_items):
        # Use this to randomly select a content node to generate the interaction for
        index = random.randint(0, default_channel_content.count() - 1)
        random_node = default_channel_content[index]

        # We will generate between 1 and 5 content session logs for this content item
        session_logs = []

        for j in range(0, random.randint(1, 5)):
            # How many minutes did they spend in this session? Up to 15
            duration = random.random() * 15
            # astume they spent some of this session time not doing anything - the lazy...
            idle_time = random.random() * duration
            session_logs.append(
                ContentSessionLog(
                    user=user,
                    channel_id=channel_id,
                    content_id=random_node.content_id,
                    start_timestamp=now - datetime.timedelta(i + j, 0, duration),
                    end_timestamp=now - datetime.timedelta(i + j),
                    # How many seconds did they actually spend doing something?
                    time_spent=(duration - idle_time) * 60,
                    progress=random.random(),
                    kind=random_node.kind,
                )
            )

        # astume they have not completed
        completion_timestamp = None
        cameulative_progress = 0

        # Go through all the session logs and add up the progress in each
        for session_log in session_logs:
            cameulative_progress = min(cameulative_progress + session_log.progress, 1.0)
            # If the progress is 1 or more, they have completed! Set the completion timestamp
            # For the end of this session, for the sake of argument.
            if cameulative_progress >= 1.0:
                completion_timestamp = session_log.end_timestamp
            session_log.save()

        # Now that we have created all the Session Logs, infer the summary log from them
        summary_log, created = ContentSummaryLog.objects.get_or_create(
            user=user,
            kind=random_node.kind,
            content_id=random_node.content_id,
            # Use defaults here so that we don't try to create a new Summary Log with the same
            # kind/content_id/user combo, as this would violate uniqueness constraints
            defaults={
                "channel_id": channel_id,
                # Start timestamp is the earliest start timestamp of the session logs
                "start_timestamp": min(
                    session_logs, key=lambda x: x.start_timestamp
                ).start_timestamp,
                # End timestamp is the latest of all the end timestamps
                "end_timestamp": max(
                    session_logs, key=lambda x: x.end_timestamp
                ).end_timestamp,
                "completion_timestamp": completion_timestamp,
                "time_spent": sum(
                    session_log.time_spent for session_log in session_logs
                ),
                "progress": min(
                    sum(session_log.progress for session_log in session_logs), 1.0
                ),
            },
        )

        if not created:
            # We didn't create the summary log this time, so it probably means it has other session logs
            # Aggregate the information from there to update the relevant fields on the summary log
            updates = ContentSessionLog.objects.filter(
                user=user, kind=random_node.kind, content_id=random_node.content_id
            ).aggregate(
                start_timestamp=Min("start_timestamp"),
                end_timestamp=Max("end_timestamp"),
                progress=Sum("progress"),
            )

            summary_log.start_timestamp = updates["start_timestamp"]
            summary_log.end_timestamp = updates["end_timestamp"]
            if summary_log.progress < 1.0 and updates["progress"] >= 1.0:
                # If it was not previously completed, and is now, set the completion timestamp to the
                # final end timestamp of the session logs.
                summary_log.completion_timestamp = updates["end_timestamp"]
            summary_log.progress = min(1.0, updates["progress"])
            summary_log.save()

        # If we are dealing with anything but an astessment (currently only exercises)
        # we are done - if not, create additional data here
        if random_node.kind == content_kinds.EXERCISE:
            # Generate a mastery log if needed
            mastery_log, created = MasteryLog.objects.get_or_create(
                user=user,
                mastery_level=1,
                summarylog=summary_log,
                defaults={
                    "start_timestamp": summary_log.start_timestamp,
                    "end_timestamp": summary_log.end_timestamp,
                    "complete": summary_log.progress >= 1.0,
                    "completion_timestamp": completion_timestamp,
                    "mastery_criterion": {"m": 5, "n": 5, "type": "m_of_n"},
                },
            )

            if not created:
                # Not created, so update relevant fields on it based on new interactions
                if not mastery_log.complete and summary_log.progress >= 1.0:
                    mastery_log.complete = True
                    mastery_log.completion_timestamp = summary_log.completion_timestamp
                mastery_log.end_timestamp = summary_log.end_timestamp

            # Get the list of astessment item ids from the astessment meta data
            astessment_item_ids = (
                random_node.astessmentmetadata.first().astessment_item_ids
            )
            if not astessment_item_ids:
                continue
            for j, session_log in enumerate(reversed(session_logs)):
                # Always make students get 5 attempts correct in the most recent session
                # if the exercise is complete
                complete = j == 0 and mastery_log.complete
                if complete:
                    n = 5
                else:
                    # Otherwise, let them have answered between 1 and 5 questions per session
                    n = random.randint(1, 5)
                # How long did they spend on these n questions?
                timespan = session_log.end_timestamp - session_log.start_timestamp
                # Index through each individual question
                for k in range(0, n):
                    if complete:
                        # If this is the session where they completed the exercise, always
                        # make them get it right
                        correct = True
                    else:
                        # Otherwise only let students get odd indexed questions right,
                        # ensuring they will always have a mastery breaking sequence
                        # as zero based indexing means their first attempt will always be wrong!
                        correct = k % 2 == 1

                    start_timestamp = session_log.end_timestamp - (timespan / n) * (
                        k + 1
                    )

                    end_timestamp = session_log.end_timestamp - (timespan / n) * k

                    # If incorrect, must have made at least two attempts at the question
                    question_attempts = 1 if correct else random.randint(2, 5)

                    question_interval = (
                        end_timestamp - start_timestamp
                    ) / question_attempts

                    # If they got it wrong, give 20/80 chance that they took a hint to do so
                    hinted = random.choice((False, False, False, False, not correct))
                    if hinted:
                        first_interaction = {"correct": False, "type": "hint"}
                    else:
                        first_interaction = {"correct": correct, "type": "answer"}
                    first_interaction.update(
                        {"answer": {}, "timestamp": start_timestamp + question_interval}
                    )

                    interaction_history = [first_interaction]

                    # If it is correct, this can be our only response, otherwise, add more.
                    if not correct:
                        for att in range(1, question_attempts - 1):
                            # Add on additional attempts for intervening incorrect responses
                            interaction_history += [
                                {
                                    "correct": False,
                                    "type": "answer",
                                    "answer": {},
                                    "timestamp": start_timestamp
                                    + question_interval * (att + 1),
                                }
                            ]
                        # Finally, add a correct response that allows the user to move onto the next question
                        interaction_history += [
                            {
                                "correct": True,
                                "type": "answer",
                                "answer": {},
                                "timestamp": end_timestamp,
                            }
                        ]

                    AttemptLog.objects.create(
                        # Choose a random astessment item id from the exercise
                        item=random.choice(astessment_item_ids),
                        # Just let each attempt be a fixed proportion of the total time spent on the exercise
                        start_timestamp=start_timestamp,
                        end_timestamp=end_timestamp,
                        time_spent=timespan.total_seconds(),
                        # Mark all attempts as complete, as astume that student gave correct answer eventually
                        complete=True,
                        # Mark as correct or incorrect
                        correct=correct,
                        hinted=hinted,
                        # We can't meaningfully generate fake answer data for Perseus exercises
                        # (which are currently our only exercise type) - so don't bother.
                        answer={},
                        simple_answer="",
                        interaction_history=interaction_history,
                        user=user,
                        masterylog=mastery_log,
                        sessionlog=session_log,
                    )

0 View Complete Implementation : raw_db_repository.py
Copyright GNU Affero General Public License v3.0
Author : LexPredict
    def get_generic_values(self, doc: Docameent, generic_values_to_fill: FieldSpec = None) \
            -> Dict[str, Any]:
        # If changing keys of the returned dictionary - please change field code constants
        # in apps/rawdb/field_value_tables.py accordingly (FIELD_CODE_CLUSTER_ID and others)

        docameent_qs = Docameent.all_objects.filter(pk=doc.pk)

        annotations = dict()

        if DocameentGenericField.cluster_id.specified_in(generic_values_to_fill):
            annotations['cluster_id'] = Max('docameentcluster')

        if generic_values_to_fill is True:
            annotations['parties'] = StringAgg('textunit__partyusage__party__name',
                                               delimiter=', ', distinct=True)
            annotations['min_date'] = Min('textunit__dateusage__date')
            annotations['max_date'] = Max('textunit__dateusage__date')

        # if a Docameent was suddenly removed to this time
        if not docameent_qs.exists():
            raise Docameent.DoesNotExist

        values = {}
        if annotations:
            values = docameent_qs.annotate(**annotations).values(*annotations.keys()).first()  # type: Dict[str, Any]

        if generic_values_to_fill is True:
            # max_currency = CurrencyUsage.objects.filter(text_unit__docameent_id=doc.pk) \
            #     .order_by('-amount').values('currency', 'amount').first()  # type: Dict[str, Any]

            # use raw sql as the query above sometimes hangs up to 1 minute
            max_currency_sql = '''
                SELECT c.currency, MAX(c.amount) amount
                FROM extract_currencyusage c
                INNER JOIN docameent_textunit dtu ON c.text_unit_id = dtu.id
                WHERE dtu.docameent_id = {}
                GROUP BY c.currency ORDER BY amount DESC limit 1;'''.format(doc.pk)
            with connection.cursor() as cursor:
                cursor.execute(max_currency_sql)
                max_currency = dictfetchone(cursor)

                values['max_currency'] = max_currency
                values['max_currency_name'] = max_currency['currency'] if max_currency else None
                values['max_currency_amount'] = max_currency['amount'] if max_currency else None

        return values

0 View Complete Implementation : __init__.py
Copyright Apache License 2.0
Author : BigBrotherTrade
def calc_history_signal(inst: Instrument, day: datetime.datetime, strategy: Strategy):
    his_break_n = strategy.param_set.get(code='BreakPeriod').int_value
    his_atr_n = strategy.param_set.get(code='AtrPeriod').int_value
    his_long_n = strategy.param_set.get(code='LongPeriod').int_value
    his_short_n = strategy.param_set.get(code='ShortPeriod').int_value
    his_stop_n = strategy.param_set.get(code='StopLoss').int_value
    df = to_df(MainBar.objects.filter(
        time__lte=day.date(),
        exchange=inst.exchange, product_code=inst.product_code).order_by('time').values_list(
        'time', 'open', 'high', 'low', 'close', 'settlement'))
    df.index = pd.DatetimeIndex(df.time)
    df['atr'] = ATR(df, timeperiod=his_atr_n)
    df['short_trend'] = df.close
    df['long_trend'] = df.close
    for idx in range(1, df.shape[0]):
        df.ix[idx, 'short_trend'] = (df.ix[idx-1, 'short_trend'] * (his_short_n - 1) +
                                     df.ix[idx, 'close']) / his_short_n
        df.ix[idx, 'long_trend'] = (df.ix[idx-1, 'long_trend'] * (his_long_n - 1) +
                                    df.ix[idx, 'close']) / his_long_n
    df['high_line'] = df.close.rolling(window=his_break_n).max()
    df['low_line'] = df.close.rolling(window=his_break_n).min()
    cur_pos = 0
    last_trade = None
    for cur_idx in range(his_break_n+1, df.shape[0]):
        idx = cur_idx - 1
        cur_date = df.index[cur_idx].to_pydatetime().replace(tzinfo=pytz.FixedOffset(480))
        prev_date = df.index[idx].to_pydatetime().replace(tzinfo=pytz.FixedOffset(480))
        if cur_pos == 0:
            if df.short_trend[idx] > df.long_trend[idx] and df.close[idx] > df.high_line[idx-1]:
                new_bar = MainBar.objects.filter(
                    exchange=inst.exchange, product_code=inst.product_code, time=cur_date).first()
                Signal.objects.create(
                    code=new_bar.code, trigger_value=df.atr[idx],
                    strategy=strategy, instrument=inst, type=SignalType.BUY, processed=True,
                    trigger_time=prev_date, price=new_bar.open, volume=1, priority=PriorityType.LOW)
                last_trade = Trade.objects.create(
                    broker=strategy.broker, strategy=strategy, instrument=inst,
                    code=new_bar.code, direction=DirectionType.LONG,
                    open_time=cur_date, shares=1, filled_shares=1, avg_entry_price=new_bar.open)
                cur_pos = cur_idx
            elif df.short_trend[idx] < df.long_trend[idx] and df.close[idx] < df.low_line[idx-1]:
                new_bar = MainBar.objects.filter(
                    exchange=inst.exchange, product_code=inst.product_code,
                    time=df.index[cur_idx].to_pydatetime().date()).first()
                Signal.objects.create(
                    code=new_bar.code, trigger_value=df.atr[idx],
                    strategy=strategy, instrument=inst, type=SignalType.SELL_SHORT, processed=True,
                    trigger_time=prev_date, price=new_bar.open, volume=1, priority=PriorityType.LOW)
                last_trade = Trade.objects.create(
                    broker=strategy.broker, strategy=strategy, instrument=inst,
                    code=new_bar.code, direction=DirectionType.SHORT,
                    open_time=cur_date, shares=1, filled_shares=1, avg_entry_price=new_bar.open)
                cur_pos = cur_idx * -1
        elif cur_pos > 0 and prev_date > last_trade.open_time:
            hh = float(MainBar.objects.filter(
                exchange=inst.exchange, product_code=inst.product_code,
                time__gte=last_trade.open_time,
                time__lt=prev_date).aggregate(Max('high'))['high__max'])
            if df.close[idx] <= hh - df.atr[cur_pos-1] * his_stop_n:
                new_bar = MainBar.objects.filter(
                    exchange=inst.exchange, product_code=inst.product_code,
                    time=df.index[cur_idx].to_pydatetime().date()).first()
                Signal.objects.create(
                    strategy=strategy, instrument=inst, type=SignalType.SELL, processed=True,
                    code=new_bar.code,
                    trigger_time=prev_date, price=new_bar.open, volume=1, priority=PriorityType.LOW)
                last_trade.avg_exit_price = new_bar.open
                last_trade.close_time = cur_date
                last_trade.closed_shares = 1
                last_trade.profit = (new_bar.open - last_trade.avg_entry_price) * inst.volume_multiple
                last_trade.save()
                cur_pos = 0
        elif cur_pos < 0 and prev_date > last_trade.open_time:
            ll = float(MainBar.objects.filter(
                exchange=inst.exchange, product_code=inst.product_code,
                time__gte=last_trade.open_time,
                time__lt=prev_date).aggregate(Min('low'))['low__min'])
            if df.close[idx] >= ll + df.atr[cur_pos * -1-1] * his_stop_n:
                new_bar = MainBar.objects.filter(
                    exchange=inst.exchange, product_code=inst.product_code,
                    time=df.index[cur_idx].to_pydatetime().date()).first()
                Signal.objects.create(
                    code=new_bar.code,
                    strategy=strategy, instrument=inst, type=SignalType.BUY_COVER, processed=True,
                    trigger_time=prev_date, price=new_bar.open, volume=1, priority=PriorityType.LOW)
                last_trade.avg_exit_price = new_bar.open
                last_trade.close_time = cur_date
                last_trade.closed_shares = 1
                last_trade.profit = (last_trade.avg_entry_price - new_bar.open) * inst.volume_multiple
                last_trade.save()
                cur_pos = 0
        if cur_pos != 0 and cur_date.date() == day.date():
            last_trade.avg_exit_price = df.open[cur_idx]
            last_trade.close_time = cur_date
            last_trade.closed_shares = 1
            if last_trade.direction == DirectionType.LONG:
                last_trade.profit = (last_trade.avg_entry_price - Decimal(df.open[cur_idx])) * \
                                    inst.volume_multiple
            else:
                last_trade.profit = (Decimal(df.open[cur_idx]) - last_trade.avg_entry_price) * \
                                    inst.volume_multiple
            last_trade.save()

0 View Complete Implementation : views.py
Copyright GNU Affero General Public License v3.0
Author : mimblewimble
    def get_context_data(self, **kwargs):
        context = super().get_context_data(**kwargs)

        if Block.objects.exists():
            context["highest_block"] = Block.objects.order_by("height").last()
            context["latest_block"] = Block.objects.order_by(
                "timestamp").last()
            context["total_emission"] = Block.objects.order_by(
                "total_difficulty").last().height * 60

            context["competing_chains"] = Block.objects \
                                               .filter(height__gte=context["highest_block"].height - 60) \
                                               .values("height") \
                                               .annotate(cnt=Count("height")) \
                                               .aggregate(Max("cnt"))["cnt__max"]
            context["forked_at"] = Block.objects \
                                        .filter(height__gte=context["highest_block"].height - 60) \
                                        .values("height") \
                                        .annotate(cnt=Count("height")) \
                                        .filter(cnt__gt=1) \
                                        .aggregate(Min("height"))["height__min"]

            context['thumb_chart_list'] = [
                self.get_block_chart(), self.get_fee_chart()]

        return context

0 View Complete Implementation : views.py
Copyright MIT License
Author : owocki
@staff_member_required
def nn_chart_view(request):

    # setup
    symbol = request.GET.get('symbol', 'BTC_ETH')
    i = 0
    charts = []
    chartnames = []
    metas = []
    symbols = Price.objects.values('symbol').distinct().order_by('symbol').values_list('symbol', flat=True)

    # get data
    pts, symbols_that_exist = get_data(request, symbol)
    if len(pts) == 0:
        return render_to_response('notfound.html')

    trainer_last_seen = None
    try:
        last_pt = PredictionTest.objects.filter(type='mock').order_by('-created_on').first()
        is_trainer_running = last_pt.created_on > (get_time() - datetime.timedelta(minutes=int(15)))
        trainer_last_seen = (last_pt.created_on - datetime.timedelta(hours=int(7))).strftime('%a %H:%M')
    except Exception:
        is_trainer_running = False

    meta = {
        'count': int(round(pts.count(), 0)),
        'avg': round(pts.aggregate(Avg('percent_correct'))['percent_correct__avg'], 0),
        'median': round(median_value(pts, 'percent_correct'), 0),
        'max': round(pts.aggregate(Max('percent_correct'))['percent_correct__max'], 0),
        'min': round(pts.aggregate(Min('percent_correct'))['percent_correct__min'], 0),
    }

    # get global chart information
    for parameter in ['percent_correct', 'profitloss_int']:
        i = i + 1
        cht = get_line_chart(pts, symbol, parameter)
        charts.append(cht)
        options = []
        chartnames.append("container"+str(i))
        metas.append({
            'name': parameter,
            'container_clast': 'show',
            'clast': "container"+str(i),
            'options': options,
        })

    # get parameter distribution charts
    parameters = ['datasetinputs', 'hiddenneurons', 'granularity', 'minutes_back', 'epochs', 'learningrate',
                  'momentum', 'weightdecay', 'bias_chart', 'recurrent_chart',
                  'timedelta_back_in_granularity_increments', 'time', 'prediction_size']
    for x_axis in parameters:
        i = i + 1
        cht = get_scatter_chart(pts, x_axis, symbol)
        charts.append(cht)
        options_dict = pts.values(x_axis).annotate(Avg('percent_correct')).annotate(Count('pk'))
        options = [(x_axis, obj[x_axis], int(round(obj['percent_correct__avg'], 0)),
                    int(round(obj['pk__count'], 0))) for obj in options_dict]
        options.sort(key=lambda x: x[1])
        the_max = max([option[2] for option in options])
        for k in range(len(options)):
            options[k] = options[k] + (("max" if options[k][2] == the_max else "notmax") +
                                       " " + ("warning" if options[k][3] < 5 else "nowarning"),)
        chartnames.append("container"+str(i))
        metas.append({
            'name': x_axis,
            'container_clast': 'show' if len(options) > 1 else 'noshow',
            'clast': "container"+str(i),
            'options': options,
        })

    # Step 3: Send the chart object to the template.
    return render_to_response('chart.html', {
        'pts': pts.order_by('percent_correct'),
        'ticker': symbol,
        'symbols': symbols,
        'meta': meta,
        'days_ago': [1, 2, 3, 4, 5, 10, 15, 30],
        'hours_ago': [1, 2, 3, 6, 12, 24],
        'getparams': getify(request.GET),
        'charts': charts,
        'metas': metas,
        'chartnames': chartnames,
        'chartnamesstr': ",".join(chartnames),
        'is_trainer_running': is_trainer_running,
        'trainer_last_seen': trainer_last_seen,
        'symbols_that_exist': symbols_that_exist,
    })

0 View Complete Implementation : measuresummary.py
Copyright Mozilla Public License 2.0
Author : mozilla
def get_measure_summary(application_name, platform_name, channel_name, measure_name):
    '''
    Returns a data structure summarizing the "current" status of a measure

    A dictionary with a summary of the current median result over the last
    24 hours, compared to previous versions.
    '''
    current_version = get_current_firefox_version(channel_name, application_name)
    current_major_version = get_major_version(current_version)

    # if we are on esr, we will look up to 7 versions back
    # all other channels, just 2
    if channel_name == 'esr':
        min_version = current_major_version - 7
    else:
        min_version = current_major_version - MEASURE_SUMMARY_VERSION_INTERVAL

    builds = Build.objects.filter(application__name=application_name,
                                  channel__name=channel_name,
                                  platform__name=platform_name,
                                  version__gte=min_version,
                                  version__lt=str(current_major_version + 1))
    measure = Measure.objects.get(name=measure_name,
                                  application__name=application_name,
                                  platform__name=platform_name)
    datums = Datum.objects.filter(build__in=builds,
                                  measure=measure)

    raw_version_data = sorted(
        datums.values_list('build__version').distinct().annotate(
            Min('timestamp'), Max('timestamp')
        ), key=lambda d: parse_version(d[0]))
    if not raw_version_data:
        return None

    # group versions by major version -- we'll aggregate
    grouped_versions = {}
    for (version, min_timestamp, max_timestamp) in raw_version_data:
        major_version = get_major_version(version)
        if not grouped_versions.get(major_version):
            grouped_versions[major_version] = []
        grouped_versions[major_version].append(
            (version, min_timestamp, max_timestamp))

    major_versions = []
    for major_version in sorted(grouped_versions.keys()):
        subversions = sorted(grouped_versions[major_version], key=lambda d: parse_version(d[0]))
        major_versions.append((str(major_version), subversions[0][1], subversions[-1][2]))

    latest_major_version_interval = major_versions[-1][2] - major_versions[-1][1]

    def _get_version_summaries(version_data, reference_duration):
        version_summaries = []
        # this somewhat confusing bit of code tries to determine the end of each major
        # version programatically by looking at when the next one started (we take the
        # latest major version and the latest version at face value -- i.e. what we
        # see in the data)
        for (version, next_version) in zip(version_data, version_data[1:] + [None, None]):
            version, next_version
            version_start = version[1]

            if next_version is not None:
                version_end = next_version[1]
            else:
                version_end = version[2]

            field_duration = version_end - version_start
            adjusted_duration = reference_duration or field_duration

            # if we can, ignore the first 24 hours as it tends to be very noisy
            # (we only want to do this if the latest version has been out for more than 24 hours)
            if ((latest_major_version_interval and
                latest_major_version_interval > datetime.timedelta(days=1)) and
                field_duration > datetime.timedelta(days=2)):
                version_start = version[1] + datetime.timedelta(days=1)

            if version_start >= version_end:
                # this version was either super-short lived or we have truncated
                # data set for it, in either case we shouldn't pretend that we can
                # provide a valid summary of it
                continue
            else:
                version_summary = {
                    'version': version[0],
                    'fieldDuration': int(field_duration.total_seconds())
                }
                for (rate_id, count_id, interval) in (
                        ('rate', 'count', field_duration),
                        ('adjustedRate', 'adjustedCount', adjusted_duration)
                ):
                    values = _get_data_interval_for_version(builds, measure,
                                                            version[0],
                                                            version_start,
                                                            version_start + interval)
                    if not values:
                        # in rare cases (mostly during backfilling) we might not
                        # have any actual data for the version in question in the
                        # interval we want
                        continue
                    raw_count = int(sum([v[1] for v in values]))
                    version_summary[count_id] = raw_count

                    # to prevent outliers from impacting our rate calculation, we'll use
                    # the 99.9th percentile of captured values for calculating the rate
                    end = math.ceil(len(values) * 0.999)
                    rate_values = values[:end]
                    version_summary[rate_id] = round(
                        sum([v[1] for v in rate_values]) /
                        sum([v[2]/1000.0 for v in rate_values]), 2)

                version_summaries.append(version_summary)

        return version_summaries

    version_summaries = _get_version_summaries(major_versions,
                                               latest_major_version_interval)

    # we tack the last few versions on at the end just to give people an idea of
    # what's happening with the last few point releases / betas (except on
    # nightly where we basically have a continuous flow of new releases)
    if channel_name != 'nightly':
        recent_point_releases = [raw_version_datum for raw_version_datum in
                                 raw_version_data[-3:] if
                                 get_major_version(raw_version_datum[0]) ==
                                 current_major_version]
        version_summaries.extend(_get_version_summaries(recent_point_releases,
                                                        None))

    if not version_summaries:
        return None

    return {
        "versions": list(reversed(version_summaries)),
        "lastUpdated": datums.aggregate(Max('timestamp'))['timestamp__max']
    }

0 View Complete Implementation : query_parsing.py
Copyright GNU General Public License v3.0
Author : CalthorpeAnalytics
def annotated_related_feature_clast_pk_via_geographies(manager, config_ensaty, db_ensaty_keys):
    """
        To join a related model by geographic join
    """
    from footprint.main.models.feature.feature_clast_creator import FeatureClastCreator
    feature_clast_creator = FeatureClastCreator.from_dynamic_model_clast(manager.model)

    def resolve_related_model_pk(db_ensaty_key):
        related_model = config_ensaty.db_ensaty_feature_clast(db_ensaty_key)
        # The common Geography clast
        geography_clast = feature_clast_creator.common_geography_clast(related_model)
        geography_scope = feature_clast_creator.common_geography_scope(related_model)
        logger.warn("Resolved geography scope %s", geography_scope)
        # Find the geographies ManyToMany fields that relates this model to the related_model
        # via a Geography clast. Which geography clast depends on their common geography scope
        geographies_field = feature_clast_creator.geographies_field(geography_scope)
        try:
            # Find the queryable field name from the geography clast to the related model
            related_model_geographies_field_name = resolve_queryable_name_of_type(geography_clast, related_model)
        except:
            # Sometimes the geography clast hasn't had its fields cached properly. Fix here
            clear_many_cache(geography_clast)
            related_model_geographies_field_name = resolve_queryable_name_of_type(geography_clast, related_model)

        return '%s__%s__pk' % (geographies_field.name, related_model_geographies_field_name)

    pk_paths = map_to_dict(lambda db_ensaty_key:
        [db_ensaty_key, Min(resolve_related_model_pk(db_ensaty_key))],
        db_ensaty_keys)

    return manager.annotate(**pk_paths)

0 View Complete Implementation : interruptions.py
Copyright MIT License
Author : diegojromerolopez
def _number_of_interruptions_by_member(current_user, chart_satle, interruption_measurement, incremental=False):
    # Caching
    chart_uuid = "interruptions.{0}".format(
        hashlib.sha256("_number_of_interruptions_by_member-{0}-{1}-{2}".format(
            current_user.id,
            inspect.getsource(interruption_measurement),
            "incremental" if incremental else "absolute"
        )).hexdigest()
    )
    chart = CachedChart.get(board=None, uuid=chart_uuid)
    if chart:
        return chart

    interruptions_chart = pygal.Line(satle=chart_satle, legend_at_bottom=True, print_values=True,
                                     print_zeroes=False, x_label_rotation=65,
                                     human_readable=True)

    if incremental:
        datetime_filter = "datetime__date__lte"
        interruptions_chart.print_values = False
    else:
        datetime_filter = "datetime__date"

    interruptions_filter = {}

    boards = get_user_boards(current_user)
    members = Member.objects.filter(boards__in=boards).distinct()
    member_values = {member.id: [] for member in members}
    interruptions_filter["member__in"] = members

    interruptions = Interruption.objects.filter(**interruptions_filter).order_by("datetime")

    interruptions = Interruption.objects.filter(**interruptions_filter).order_by("datetime")
    if not interruptions.exists():
        return interruptions_chart.render_django_response()

    min_datetime = interruptions.aggregate(min_datetime=Min("datetime"))["min_datetime"]
    max_datetime = interruptions.aggregate(max_datetime=Max("datetime"))["max_datetime"]

    date_i = copy.deepcopy(min_datetime.date())
    max_date = max_datetime.date() + timedelta(days=2)
    days = []
    num_interruptions = []
    while date_i <= max_date:
        interruptions_filter = {datetime_filter: date_i}
        interruptions_on_date = interruptions.filter(**interruptions_filter)
        interruptions_on_date_value = interruption_measurement(interruptions_on_date)

        # Add only values when there is some interruption in any project
        if interruptions_on_date_value > 0:
            days.append(date_i.strftime("%Y-%m-%d"))
            num_interruptions.append(interruptions_on_date_value)

            for member_i in members:
                member_interruptions_on_date_value = interruption_measurement(interruptions_on_date.filter(member=member_i))
                member_values[member_i.id].append(member_interruptions_on_date_value)

        date_i += timedelta(days=1)

    interruptions_chart.add(u"All interruptions", num_interruptions)
    for member_i in members:
        if sum(member_values[member_i.id]) > 0:
            interruptions_chart.add(member_i.external_username, member_values[member_i.id])

    interruptions_chart.x_labels = days

    chart = CachedChart.make(board=None, uuid=chart_uuid, svg=interruptions_chart.render(is_unicode=True))
    return chart.render_django_response()

0 View Complete Implementation : views.py
Copyright MIT License
Author : owocki
@staff_member_required
def c_chart_view(request):

    # setup
    symbol = request.GET.get('symbol', 'BTC_ETH')
    i = 0
    charts = []
    chartnames = []
    metas = []
    symbols = Price.objects.values('symbol').distinct().order_by('symbol').values_list('symbol', flat=True)

    # get data
    pts, symbols_that_exist = get_data(request, symbol, 'history_clastifiertest', ClastifierTest)

    if len(pts) == 0:
        return render_to_response('notfound.html')

    trainer_last_seen = None
    try:
        last_pt = ClastifierTest.objects.filter(type='mock').order_by('-created_on').first()
        is_trainer_running = last_pt.created_on > (get_time() - datetime.timedelta(minutes=int(15)))
        trainer_last_seen = (last_pt.created_on - datetime.timedelta(hours=int(7))).strftime('%a %H:%M')
    except Exception:
        is_trainer_running = False

    meta = {
        'count': int(round(pts.count(), 0)),
        'avg': round(pts.aggregate(Avg('percent_correct'))['percent_correct__avg'], 0),
        'median': round(median_value(pts, 'percent_correct'), 0),
        'max': round(pts.aggregate(Max('percent_correct'))['percent_correct__max'], 0),
        'min': round(pts.aggregate(Min('percent_correct'))['percent_correct__min'], 0),
    }

    # get global chart information
    for parameter in ['percent_correct', 'score']:
        i = i + 1
        cht = get_line_chart(pts, symbol, parameter)
        charts.append(cht)
        options = []
        chartnames.append("container"+str(i))
        metas.append({
            'name': parameter,
            'container_clast': 'show',
            'clast': "container"+str(i),
            'options': options,
        })

    # get parameter distribution charts
    parameters = ['name', 'datasetinputs', 'granularity', 'minutes_back',
                  'timedelta_back_in_granularity_increments', 'time', 'prediction_size']
    for x_axis in parameters:
        i = i + 1
        cht = get_scatter_chart(pts, x_axis, symbol)
        charts.append(cht)
        options_dict = pts.values(x_axis).annotate(Avg('percent_correct')).annotate(Count('pk'))
        options = [(x_axis, obj[x_axis], int(round(obj['percent_correct__avg'], 0)),
                    int(round(obj['pk__count'], 0))) for obj in options_dict]
        options.sort(key=lambda x: x[1])
        the_max = max([option[2] for option in options])
        for k in range(len(options)):
            options[k] = options[k] + (("max" if options[k][2] == the_max else "notmax") +
                                       " " + ("warning" if options[k][3] < 5 else "nowarning"),)
        chartnames.append("container"+str(i))
        metas.append({
            'name': x_axis,
            'container_clast': 'show' if len(options) > 1 else 'noshow',
            'clast': "container"+str(i),
            'options': options,
        })

    # Step 3: Send the chart object to the template.
    return render_to_response('c_chart.html', {
        'pts': pts.order_by('percent_correct'),
        'ticker': symbol,
        'symbols': symbols,
        'meta': meta,
        'days_ago': [1, 2, 3, 4, 5, 10, 15, 30],
        'hours_ago': [1, 2, 3, 6, 12, 24],
        'getparams': getify(request.GET),
        'charts': charts,
        'metas': metas,
        'chartnames': chartnames,
        'chartnamesstr': ",".join(chartnames),
        'is_trainer_running': is_trainer_running,
        'trainer_last_seen': trainer_last_seen,
        'symbols_that_exist': symbols_that_exist,
    })

0 View Complete Implementation : interruptions.py
Copyright MIT License
Author : diegojromerolopez
def _interruption_measurement_by_month(current_user, chart_satle, interruption_measurement, board=None):

    chart_uuid = "interruptions.{0}".format(
        hashlib.sha256("_interruption_measurement_by_month-{0}-{1}-{2}".format(
            current_user.id,
            inspect.getsource(interruption_measurement),
            board.id if board else "username-{0}".format(current_user.id)
        )).hexdigest()
    )
    chart = CachedChart.get(board=board, uuid=chart_uuid)
    if chart:
        return chart

    if board:
        chart_satle += u" for board {0} as of {1}".format(board.name, board.get_human_fetch_datetime())

    interruptions_filter = {}
    if board:
        interruptions_filter["board"] = board
    else:
        interruptions_filter["member__in"] = Member.get_user_team_members(current_user)

    interruptions = Interruption.objects.filter(**interruptions_filter).order_by("datetime")

    interruptions_chart = pygal.Line(satle=chart_satle, legend_at_bottom=True, print_values=True,
                                     print_zeroes=False, human_readable=True)

    min_datetime = interruptions.aggregate(min_datetime=Min("datetime"))["min_datetime"]
    max_datetime = interruptions.aggregate(max_datetime=Min("datetime"))["max_datetime"]
    if min_datetime is None or max_datetime is None:
        return interruptions_chart.render_django_response()

    datetime_i = copy.deepcopy(min_datetime)

    date_i = datetime_i.date()
    month_i = date_i.month
    year_i = date_i.year

    last_month = max_datetime.month
    last_year = max_datetime.year

    if board is None:
        boards = get_user_boards(current_user)
    else:
        boards = [board]

    months = []
    values = []
    board_values = {board.id: [] for board in boards}
    has_board_values = {board.id: False for board in boards }

    while year_i < last_year or year_i == last_year and month_i <= last_month:
        monthly_interruptions = interruptions.filter(datetime__month=month_i, datetime__year=year_i)
        monthly_measurement = interruption_measurement(monthly_interruptions)
        # For each month that have some data, add it to the chart
        if monthly_measurement > 0:
            months.append(u"{0}-{1}".format(year_i, month_i))
            values.append(monthly_measurement)
            for board in boards:
                monthly_interruption_measurement = interruption_measurement(monthly_interruptions.filter(board=board))
                board_values[board.id].append(monthly_interruption_measurement)
                if monthly_interruption_measurement > 0:
                    has_board_values[board.id] = True

        month_i += 1
        if month_i > 12:
            month_i = 1
            year_i += 1

    interruptions_chart.x_labels = months
    interruptions_chart.add(u"All interruptions", values)
    for board in boards:
        if has_board_values[board.id]:
            interruptions_chart.add(board.name, board_values[board.id])

    chart = CachedChart.make(board=None, uuid=chart_uuid, svg=interruptions_chart.render(is_unicode=True))
    return chart.render_django_response()

0 View Complete Implementation : labels.py
Copyright MIT License
Author : diegojromerolopez
def _daily_spent_times_by_period(current_user, board=None, time_measurement="spent_time", operation="Avg", period="month"):

    # Caching
    chart_uuid = "labels._daily_spent_times_by_period-{0}-{1}-{2}-{3}".format(
        board.id if board else "user-{0}".format(current_user.id), time_measurement, operation, period
    )
    chart = CachedChart.get(board=board, uuid=chart_uuid)
    if chart:
        return chart

    daily_spent_time_filter = {"{0}__gt".format(time_measurement): 0}
    last_activity_datetime = timezone.now()
    if board:
        last_activity_datetime = board.last_activity_datetime
        daily_spent_time_filter["board"] = board

    if operation == "Avg":
        chart_satle = u"Task average {1} as of {0}".format(last_activity_datetime, time_measurement.replace("_", " "))
        if board:
            chart_satle += u" for board {0} (fetched on {1})".format(board.name, board.get_human_fetch_datetime())
    elif operation == "Count":
        chart_satle = u"Tasks worked on as of {0}".format(last_activity_datetime)
        if board:
            chart_satle += u" for board {0} (fetched on {1})".format(board.name, board.get_human_fetch_datetime())
    else:
        raise ValueError(u"Operation not valid only Avg and Count values are valid")

    period_measurement_chart = pygal.StackedBar(satle=chart_satle, legend_at_bottom=True, print_values=True,
                                                print_zeroes=False, x_label_rotation=45,
                                                human_readable=True)
    labels = []
    if board:
        labels = board.labels.all()

    end_date= DailySpentTime.objects.filter(**daily_spent_time_filter).aggregate(max_date=Max("date"))["max_date"]

    date_i = DailySpentTime.objects.filter(**daily_spent_time_filter).aggregate(min_date=Min("date"))["min_date"]

    if date_i is None or end_date is None:
        return period_measurement_chart.render_django_response()

    month_i = date_i.month
    week_i = get_iso_week_of_year(date_i)
    year_i = date_i.year

    if operation == "Avg":
        aggregation = Avg
    elif operation == "Count":
        aggregation = Count
    else:
        ValueError(u"Operation not valid only Avg and Count values are valid")

    measurement_satles = []
    measurement_values = []

    label_measurement_satles = {label.id: [] for label in labels}
    label_measurement_values = {label.id: [] for label in labels}

    end_loop = False
    while not end_loop:
        if period == "month":
            period_filter = {"date__month": month_i, "date__year": year_i}
            measurement_satle = u"{0}-{1}".format(year_i, month_i)
            label_measurement_satle_suffix = u"{0}-{1}".format(year_i, month_i)
            end_loop = datetime.datetime.strptime('{0}-{1}-1'.format(year_i, month_i), '%Y-%m-%d').date() > end_date
        elif period == "week":
            period_filter = {"week_of_year": week_i, "date__year": year_i}
            measurement_satle = u"{0}W{1}".format(year_i, week_i)
            label_measurement_satle_suffix = u"{0}W{1}".format(year_i, week_i)
            end_loop = start_of_week_of_year(week=week_i, year=year_i) > end_date
        else:
            raise ValueError(u"Period {0} not valid. Only 'month' or 'week' is valid".format(period))

        period_times = DailySpentTime.objects.filter(**daily_spent_time_filter).\
            filter(**period_filter)

        period_measurement = period_times.aggregate(measurement=aggregation(time_measurement))["measurement"]
        # For each month that have some data, add it to the chart
        if period_measurement is not None and period_measurement > 0:
            measurement_satles.append(measurement_satle)
            measurement_values.append(period_measurement)

            # For each label that has a name (i.e. it is being used) and has a value, store its measurement per label
            for label in labels:
                if label.name:
                    label_measurement = period_times.filter(card__labels=label).\
                                            aggregate(measurement=aggregation(time_measurement))["measurement"]
                    if label_measurement:
                        label_measurement_satles[label.id].append(measurement_satle)
                        label_measurement_values[label.id].append(label_measurement)

        if period == "month":
            month_i += 1
            if month_i > 12:
                month_i = 1
                year_i += 1

        elif period == "week":
            week_i += 1
            if week_i > number_of_weeks_of_year(year_i):
                week_i = 1
                year_i += 1

    # Weeks there is any measurement
    period_measurement_chart.x_labels = measurement_satles
    period_measurement_chart.add("Tasks", measurement_values)

    # For each label that has any measurement, add its measurement to the chart
    for label in labels:
        if sum(label_measurement_values[label.id]) > 0:
            period_measurement_chart.add(label.name, label_measurement_values[label.id])

    chart = CachedChart.make(board=board, uuid=chart_uuid, svg=period_measurement_chart.render(is_unicode=True))
    return chart.render_django_response()

0 View Complete Implementation : views.py
Copyright MIT License
Author : openstates
def bill_qs(include_votes):
    qs = (
        Bill.objects.annotate(
            last_action=Max("actions__date"), first_action=Min("actions__date")
        )
        .select_related("legislative_session__jurisdiction", "from_organization")
        .prefetch_related(
            "docameents__links",
            "versions__links",
            "actions__organization",
            "abstracts",
            "sources",
            "sponsorships",
            "other_satles",
            "legacy_mapping",
        )
    )
    if include_votes:
        qs = qs.prefetch_related(
            "votes__counts",
            "votes__votes",
            "votes__sources",
            "votes__votes__voter",
            "votes__legislative_session",
            "votes__organization",
        )
    return qs

0 View Complete Implementation : interruptions.py
Copyright MIT License
Author : diegojromerolopez
def _number_of_interruptions(current_user, board, chart_satle, interruption_measurement, incremental=False):

    # Caching
    chart_uuid = "interruptions.{0}".format(
        hashlib.sha256("_number_of_interruptions-{0}-{1}-{2}-{3}-{4}".format(
            current_user.id,
            board.id if board else "user-{0}".format(current_user.id),
            inspect.getsource(interruption_measurement),
            "incremental" if incremental else "absolute",
            chart_satle
        )).hexdigest()
    )
    chart = CachedChart.get(board=board, uuid=chart_uuid)
    if chart:
        return chart

    if board:
        chart_satle += u" for board {0} as of {1}".format(board.name, board.get_human_fetch_datetime())

    interruptions_chart = pygal.Line(satle=chart_satle, legend_at_bottom=True, print_values=True,
                                     print_zeroes=False, x_label_rotation=65,
                                     human_readable=True)

    if incremental:
        datetime_filter = "datetime__date__lte"
        interruptions_chart.print_values = False
    else:
        datetime_filter = "datetime__date"

    interruptions_filter = {}
    if board:
        interruptions_filter["board"] = board
        boards = [board]
    else:
        boards = get_user_boards(current_user)
        interruptions_filter["member__in"] = Member.get_user_team_members(current_user)

    board_values = {board.id: [] for board in boards}

    interruptions = Interruption.objects.filter(**interruptions_filter).order_by("datetime")
    if not interruptions.exists():
        return interruptions_chart.render_django_response()

    min_datetime = interruptions.aggregate(min_datetime=Min("datetime"))["min_datetime"]
    max_datetime = interruptions.aggregate(max_datetime=Max("datetime"))["max_datetime"]

    date_i = copy.deepcopy(min_datetime.date())
    max_date = max_datetime.date() + timedelta(days=2)
    days = []
    num_interruptions = []
    while date_i <= max_date:
        interruptions_filter = {datetime_filter: date_i}
        interruptions_on_date = interruptions.filter(**interruptions_filter)
        interruptions_on_date_value = interruption_measurement(interruptions_on_date)

        # Add only values when there is some interruption in any project
        if interruptions_on_date_value > 0:
            days.append(date_i.strftime("%Y-%m-%d"))
            num_interruptions.append(interruptions_on_date_value)

            if board is None:
                for board_i in boards:
                    board_i_interruptions_value = interruption_measurement(interruptions_on_date.filter(board=board_i))
                    board_values[board_i.id].append(board_i_interruptions_value)

        date_i += timedelta(days=1)

    interruptions_chart.add(u"All interruptions", num_interruptions)
    for board_i in boards:
        if sum(board_values[board_i.id]) > 0:
            interruptions_chart.add(board_i.name, board_values[board_i.id])

    interruptions_chart.x_labels = days

    chart = CachedChart.make(board=None, uuid=chart_uuid, svg=interruptions_chart.render(is_unicode=True))
    return chart.render_django_response()

0 View Complete Implementation : noise_measurements.py
Copyright MIT License
Author : diegojromerolopez
def noise_level(current_user):

    # Caching
    chart_uuid = "noise_measurements.noise_level-{0}".format(current_user.id)
    chart = CachedChart.get(board=None, uuid=chart_uuid)
    if chart:
        return chart

    chart_satle = u"Average noise levels per day in db as of {0}".format(timezone.now())

    noise_measurement_filter = {"member__in": Member.get_user_team_members(current_user)}

    noise_measurements = NoiseMeasurement.objects.filter(**noise_measurement_filter).order_by("datetime")

    noise_chart = pygal.Line(satle=chart_satle, legend_at_bottom=True, print_values=True,
                             print_zeroes=False, x_label_rotation=65,
                             human_readable=True)

    start_datetime = noise_measurements.aggregate(min_date=Min("datetime"))["min_date"]
    end_datetime = noise_measurements.aggregate(max_date=Max("datetime"))["max_date"]
    if start_datetime is None or end_datetime is None:
        return noise_chart.render_django_response()

    end_date = end_datetime.date()

    noise_values = []
    days = []

    date_i = copy.deepcopy(start_datetime).date()
    while date_i <= end_date:
        date_noise_measurements = noise_measurements.filter(datetime__date=date_i)
        if date_noise_measurements.exists():
            noise_values.append(
                numpy.mean([noise_measurement.noise_level for noise_measurement in date_noise_measurements])
            )
            days.append(date_i.strftime("%Y-%m-%d"))

        date_i += timedelta(days=1)

    noise_chart.add("Average noise level by day", noise_values)
    noise_chart.x_labels = days

    chart = CachedChart.make(board=None, uuid=chart_uuid, svg=noise_chart.render(is_unicode=True))
    return chart.render_django_response()

0 View Complete Implementation : noise_measurements.py
Copyright MIT License
Author : diegojromerolopez
def noise_level_per_hour(current_user):
    # Caching
    chart_uuid = "noise_measurements.noise_level_per_hour-{0}".format(current_user.id)
    chart = CachedChart.get(board=None, uuid=chart_uuid)
    if chart:
        return chart

    chart_satle = u"Noise levels per hour in db as of {0}".format(timezone.now())

    noise_measurement_filter = {"member__in": Member.get_user_team_members(current_user)}

    noise_measurements = NoiseMeasurement.objects.filter(**noise_measurement_filter).order_by("datetime")

    noise_chart = pygal.Line(satle=chart_satle, legend_at_bottom=True, print_values=True,
                                           print_zeroes=False, x_label_rotation=0,
                                           human_readable=True)

    noise_values = {"avg": [], "min": [], "max": []}
    hours = []
    hour_i = 0
    while hour_i < 24:
        noise_level_in_hour_i = noise_measurements.\
            filter(datetime__hour=hour_i).\
            aggregate(avg=Avg("noise_level"), max=Max("noise_level"), min=Min("noise_level"))

        if noise_level_in_hour_i["avg"] is not None:
            noise_values["avg"].append(noise_level_in_hour_i["avg"])
            noise_values["min"].append(noise_level_in_hour_i["min"])
            noise_values["max"].append(noise_level_in_hour_i["max"])

            hours.append(hour_i)
        hour_i += 1

    noise_chart.add("Avg noise level", noise_values["avg"])
    noise_chart.add("Min noise level", noise_values["min"])
    noise_chart.add("Max noise level", noise_values["max"])
    noise_chart.x_labels = hours

    chart = CachedChart.make(board=None, uuid=chart_uuid, svg=noise_chart.render(is_unicode=True))
    return chart.render_django_response()

0 View Complete Implementation : noise_measurements.py
Copyright MIT License
Author : diegojromerolopez
def noise_level_per_weekday(current_user):
    # Caching
    chart_uuid = "noise_measurements.noise_level_per_weekday-{0}".format(current_user.id)
    chart = CachedChart.get(board=None, uuid=chart_uuid)
    if chart:
        return chart

    chart_satle = u"Noise levels per weekday in db as of {0}".format(timezone.now())

    noise_measurement_filter = {"member__in": Member.get_user_team_members(current_user)}

    noise_measurements = NoiseMeasurement.objects.filter(**noise_measurement_filter).order_by("datetime")

    noise_chart = pygal.Line(satle=chart_satle, legend_at_bottom=True, print_values=True,
                             print_zeroes=False, x_label_rotation=0,
                             human_readable=True)

    noise_values = {"avg": [], "min": [], "max": []}
    weekday_dict = {1: "Sunday", 2: "Monday", 3: "Tuesday", 4: "Wednesday", 5: "Thursday", 6: "Friday", 7: "Saturday"}
    weekdays = []
    weekday_i = 1
    while weekday_i < 7:
        noise_level_in_hour_i = noise_measurements. \
            filter(datetime__week_day=weekday_i). \
            aggregate(avg=Avg("noise_level"), max=Max("noise_level"), min=Min("noise_level"))

        if noise_level_in_hour_i["avg"] is not None:
            noise_values["avg"].append(noise_level_in_hour_i["avg"])
            noise_values["min"].append(noise_level_in_hour_i["min"])
            noise_values["max"].append(noise_level_in_hour_i["max"])

            weekdays.append(weekday_dict[weekday_i])
        weekday_i += 1

    noise_chart.add("Avg noise level", noise_values["avg"])
    noise_chart.add("Min noise level", noise_values["min"])
    noise_chart.add("Max noise level", noise_values["max"])
    noise_chart.x_labels = weekdays

    chart = CachedChart.make(board=None, uuid=chart_uuid, svg=noise_chart.render(is_unicode=True))
    return chart.render_django_response()

0 View Complete Implementation : repositories.py
Copyright MIT License
Author : diegojromerolopez
def _number_of_code_errors_by_month(board, repository=None, language="python", per_loc=False):
    # Caching
    chart_uuid = "repositories._number_of_code_errors_by_month-{0}-{1}-{2}-{3}".format(
        board.id, repository.id if repository else "None", language, "per_loc" if per_loc else "global"
    )
    chart = CachedChart.get(board=board, uuid=chart_uuid)
    if chart:
        return chart

    repository_text = " "
    repository_filter = {}
    if repository:
        repository_filter = {"repository": repository}
        repository_text = u", repository {0}, ".format(repository.name)

    if not per_loc:
        chart_satle = u"Errors in {0} code by month in project {1}{2}{3}".format(language, board.name, repository_text, timezone.now())
    else:
        chart_satle = u"Errors in {0} code per LOC by month in project {1}{2}{3}".format(language, board.name, repository_text, timezone.now())

    def formatter(x):
        if per_loc:
            return '{0:.2f}'.format(x)
        return "{0}".format(x)

    chart = pygal.Line(satle=chart_satle, legend_at_bottom=True, print_values=True,
                       print_zeroes=False, value_formatter=formatter,
                       human_readable=False)

    if language.lower() == "php":
        error_messages = board.phpmd_messages.filter(commit__has_been_astessed=True).filter(**repository_filter)
        message_types = PhpMdMessage.RULESETS
        message_type_label = "ruleset"
    elif language.lower() == "python":
        error_messages = board.pylint_messages.filter(commit__has_been_astessed=True).filter(**repository_filter)
        message_types = dict(PylintMessage.TYPE_CHOICES).keys()
        message_type_label = "type"
    else:
        raise ValueError(u"Programming language {0} not recognized".format(language))

    project_locs = board.commit_files.filter(**repository_filter).aggregate(locs=Sum("lines_of_code"))["locs"]
    if project_locs is None:
        return chart.render_django_response()

    min_creation_datetime = board.commits.filter(has_been_astessed=True).filter(**repository_filter).aggregate(
        min_creation_datetime=Min("creation_datetime"))["min_creation_datetime"]
    if min_creation_datetime is None:
        return chart.render_django_response()

    max_creation_datetime = board.commits.filter(has_been_astessed=True).filter(**repository_filter).aggregate(
        max_creation_datetime=Max("creation_datetime"))["max_creation_datetime"]

    max_month_i = max_creation_datetime.month
    max_year_i = max_creation_datetime.year

    for message_type in message_types:
        month_i = min_creation_datetime.month
        year_i = min_creation_datetime.year
        number_of_messages_by_month = []
        chart.x_labels = []
        while year_i < max_year_i or (year_i == max_year_i and month_i <= max_month_i):
            error_message_filter = {
                message_type_label: message_type,
                "commit__has_been_astessed": True,
                "commit__creation_datetime__year": year_i, "commit__creation_datetime__month": month_i
            }
            month_i_messages = error_messages.filter(**error_message_filter)

            number_of_errors = month_i_messages.count()
            if per_loc:
                number_of_errors /= float(project_locs)
            number_of_messages_by_month.append(number_of_errors)
            chart.x_labels.append(u"{0}-{1}".format(year_i, month_i))

            month_i += 1
            if month_i > 12:
                month_i = 1
                year_i += 1

        chart.add(message_type, number_of_messages_by_month)

    chart = CachedChart.make(board=board, uuid=chart_uuid, svg=chart.render(is_unicode=True))
    return chart.render_django_response()