@@ -415,9 +415,9 @@ def get_metadata_for_bucket(bucketid: str, release: str = None):
415415 try :
416416 if not release :
417417 # Get all columns up to "~" (non-inclusive)
418- rows = BucketMetadata .objects .filter (key = bucketid , column1__lt = "~" ).all ()
418+ rows = BucketMetadata .objects .filter (key = bucketid . encode () , column1__lt = "~" ).all ()
419419 else :
420- rows = BucketMetadata .objects .filter (key = bucketid ).all ()
420+ rows = BucketMetadata .objects .filter (key = bucketid . encode () ).all ()
421421
422422 ret = {}
423423 for row in rows :
@@ -426,6 +426,9 @@ def get_metadata_for_bucket(bucketid: str, release: str = None):
426426 if release and ret :
427427 try :
428428 ret ["FirstSeen" ] = ret ["~%s:FirstSeen" % release ]
429+ except KeyError :
430+ pass
431+ try :
429432 ret ["LastSeen" ] = ret ["~%s:LastSeen" % release ]
430433 except KeyError :
431434 pass
@@ -444,37 +447,7 @@ def chunks(l, n):
444447def get_metadata_for_buckets (bucketids , release = None ):
445448 ret = dict ()
446449 for bucketid in bucketids :
447- bucket_key = bucketid .encode () if isinstance (bucketid , str ) else bucketid
448- try :
449- if not release :
450- rows = BucketMetadata .objects .filter (key = bucket_key , column1__lt = "~" ).all ()
451- else :
452- rows = BucketMetadata .objects .filter (key = bucket_key ).all ()
453-
454- bucket_data = {}
455- for row in rows :
456- bucket_data [row .column1 ] = row .value
457-
458- if bucket_data :
459- ret [bucketid ] = bucket_data
460- except DoesNotExist :
461- pass
462-
463- if release :
464- for bucket_id in ret :
465- bucket = ret [bucket_id ]
466- try :
467- bucket ["FirstSeen" ] = bucket ["~%s:FirstSeen" % release ]
468- bucket ["LastSeen" ] = bucket ["~%s:LastSeen" % release ]
469- except KeyError :
470- # Rather than confuse developers with half release-specific
471- # data. Of course this will only apply for the current row, so
472- # it's possible subsequent rows will show release-specific
473- # data.
474- if "FirstSeen" in bucket :
475- del bucket ["FirstSeen" ]
476- if "LastSeen" in bucket :
477- del bucket ["LastSeen" ]
450+ ret [bucketid ] = get_metadata_for_bucket (bucketid , release )
478451 return ret
479452
480453
@@ -645,65 +618,75 @@ def get_package_crash_rate(
645618
646619 # the generic counter only includes Crashes for packages from official
647620 # Ubuntu sources and from systems not under auto testing
648- old_vers_column = "%s:%s:%s" % (release , src_package , old_version )
649- new_vers_column = "%s:%s:%s" % (release , src_package , new_version )
621+ old_vers_column = "oopses:Crash: %s:%s:%s" % (release , src_package , old_version )
622+ new_vers_column = "oopses:Crash: %s:%s:%s" % (release , src_package , new_version )
650623 results = {}
651624
652625 try :
653- # The first thing done is the reversing of the order that's why it
654- # is column_start (get items <= date in reverse order)
655626 old_rows = (
656627 Counters .objects .filter (key = old_vers_column .encode (), column1__lte = date )
628+ .order_by ("-column1" )
657629 .limit (15 )
658630 .all ()
659631 )
660- old_rows_sorted = sorted (old_rows , key = lambda x : x .column1 , reverse = True )
661- old_vers_data = {row .column1 : row .value for row in old_rows_sorted }
632+ old_vers_data = {row .column1 : row .value for row in old_rows }
662633 except DoesNotExist :
663634 old_vers_data = None
664635
665636 try :
666637 # this may be unnecessarily long since updates phase in ~3 days
667- new_rows = Counters .objects .filter (key = new_vers_column .encode ()).limit (15 ).all ()
668- new_rows_sorted = sorted (new_rows , key = lambda x : x .column1 , reverse = True )
669- new_vers_data = {row .column1 : row .value for row in new_rows_sorted }
638+ new_rows = (
639+ Counters .objects .filter (key = new_vers_column .encode ())
640+ .order_by ("-column1" )
641+ .limit (15 )
642+ .all ()
643+ )
644+ print (new_rows )
645+ new_vers_data = {row .column1 : row .value for row in new_rows }
646+ print (new_vers_data )
670647 except DoesNotExist :
648+ print ("New data does not exist" )
671649 results ["increase" ] = False
672650 return results
673651
674652 if not new_vers_data :
653+ print ("No new data" )
675654 results ["increase" ] = False
676655 return results
677656
678657 if exclude_proposed :
679658 try :
680659 proposed_old_rows = (
681660 CountersForProposed .objects .filter (key = old_vers_column .encode (), column1__lte = date )
661+ .order_by ("-column1" )
682662 .limit (15 )
683663 .all ()
684664 )
685- proposed_old_rows_sorted = sorted (
686- proposed_old_rows , key = lambda x : x .column1 , reverse = True
687- )
688- proposed_old_vers_data = {row .column1 : row .value for row in proposed_old_rows_sorted }
665+ proposed_old_vers_data = {row .column1 : row .value for row in proposed_old_rows }
689666 except DoesNotExist :
690667 proposed_old_vers_data = None
691668 try :
692669 proposed_new_rows = (
693- CountersForProposed .objects .filter (key = new_vers_column .encode ()). limit ( 15 ). all ()
694- )
695- proposed_new_rows_sorted = sorted (
696- proposed_new_rows , key = lambda x : x . column1 , reverse = True
670+ CountersForProposed .objects .filter (key = new_vers_column .encode ())
671+ . order_by ( "-column1" )
672+ . limit ( 15 )
673+ . all ()
697674 )
698- proposed_new_vers_data = {row .column1 : row .value for row in proposed_new_rows_sorted }
675+ proposed_new_vers_data = {row .column1 : row .value for row in proposed_new_rows }
699676 except DoesNotExist :
700677 proposed_new_vers_data = None
701678
679+ print (f"{ proposed_old_vers_data = } " )
680+ print (f"{ proposed_new_vers_data = } " )
681+ print (f"{ old_vers_data = } " )
682+ print (f"{ new_vers_data = } " )
702683 today = datetime .datetime .utcnow ().strftime ("%Y%m%d" )
684+ print (today )
703685 try :
704686 today_crashes = new_vers_data [today ]
705687 except KeyError :
706688 # no crashes today so not an increase
689+ print ("No data for today" )
707690 results ["increase" ] = False
708691 return results
709692
@@ -716,6 +699,7 @@ def get_package_crash_rate(
716699 today_crashes = today_crashes - today_proposed_crashes
717700 if today_crashes == 0 :
718701 # no crashes today so not an increase
702+ print ("No data for today outside -proposed" )
719703 results ["increase" ] = False
720704 return results
721705
@@ -733,8 +717,11 @@ def get_package_crash_rate(
733717 return results
734718
735719 first_date = date
720+ print (f"{ first_date = } " )
736721 oldest_date = list (old_vers_data .keys ())[- 1 ]
722+ print (f"{ oldest_date = } " )
737723 dates = [x for x in _date_range_iterator (oldest_date , first_date )]
724+ print (f"{ dates = } " )
738725 previous_vers_crashes = []
739726 previous_days = len (dates [:- 1 ])
740727 for day in dates [:- 1 ]:
@@ -756,12 +743,15 @@ def get_package_crash_rate(
756743 results ["increase" ] = False
757744 # 2 crashes may be a fluke
758745 if today_crashes < 3 :
746+ print ("Less than 3 crashes today" )
759747 return results
760748
761749 now = datetime .datetime .utcnow ()
762750 hour = float (now .hour )
763751 minute = float (now .minute )
764752 mean_crashes = numpy .average (previous_vers_crashes )
753+ print (f"{ mean_crashes = } " )
754+ print (f"{ previous_vers_crashes = } " )
765755 standard_crashes = (mean_crashes + numpy .std (previous_vers_crashes )).round ()
766756 # if an update isn't fully phased then the previous package version will
767757 # generally have more crashes than the phasing one so multiple the quanity
@@ -786,6 +776,10 @@ def get_package_crash_rate(
786776 results ["web_link" ] = absolute_uri + web_link
787777 results ["previous_period_in_days" ] = previous_days
788778 results ["previous_average" ] = standard_crashes
779+ print ("Difference less than 1" )
780+ print (f"{ difference = } " )
781+ print (f"{ today_crashes = } " )
782+ print (f"{ standard_crashes = } " )
789783 return results
790784
791785
0 commit comments