Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions .github/workflows/run-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,22 @@ jobs:
- name: Style Checks
run: poetry run flake8 tom_* --exclude=*/migrations/* --max-line-length=120

check_migrations:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: "3.12"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install "poetry>=2.0,<3.0"
poetry install
- name: Missing Migrations
run: |
poetry run python tom_jpl/tests/check_migrations.py

run_tests:
runs-on: ubuntu-latest
strategy:
Expand Down
18 changes: 18 additions & 0 deletions tom_jpl/apps.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,21 @@ def data_services(self):
return [
{'class': f'{self.name}.jpl.ScoutDataService'},
]

def target_detail_tabs(self):
"""
Integration point for adding tabs to the target detail page.

This method should return a list of dictionaries that include a `partial` key pointing to the path of the html
target_detail_tab partial.
The `context` key should point to the dot separated string path to the templatetag that will return a
dictionary containing new context for the accompanying partial.
The `label` key will represent the label string to put in the tab and use as a tab reference id.

This partial will be displayed within the tab on the target detail page.

"""
return [{'partial': f'{self.name}/partials/scoutdetails_partial.html',
'label': 'Scout Details',
'context': f'{self.name}.templatetags.scoutdetail_extras.tab_context'
}]
14 changes: 7 additions & 7 deletions tom_jpl/migrations/0001_initial.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,17 @@ class Migration(migrations.Migration):
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('num_obs', models.IntegerField(blank=True, help_text='Number of observations', null=True)),
('neo_score', models.IntegerField(blank=True, help_text='NEO digest score (0..100)', null=True)),
('neo1km_score', models.IntegerField(blank=True, help_text='NEO >1km digest score (0..100)', null=True)),
('pha_score', models.IntegerField(blank=True, help_text='PHA digest score (0..100)', null=True)),
('ieo_score', models.IntegerField(blank=True, help_text='IEO digest score (0..100)', null=True)),
('geocentric_score', models.IntegerField(blank=True, help_text='Geocentric digest score (0..100)', null=True)),
('neo_score', models.IntegerField(blank=True, verbose_name='NEO Score', help_text='NEO digest score (0..100)', null=True)),
('neo1km_score', models.IntegerField(blank=True, verbose_name='NEO >1km Score', help_text='NEO >1km digest score (0..100)', null=True)),
('pha_score', models.IntegerField(blank=True, verbose_name='PHA Score', help_text='PHA digest score (0..100)', null=True)),
('ieo_score', models.IntegerField(blank=True, verbose_name='IEO Score', help_text='IEO digest score (0..100)', null=True)),
('geocentric_score', models.IntegerField(blank=True, verbose_name='Geocentric Score', help_text='Geocentric digest score (0..100)', null=True)),
('impact_rating', models.IntegerField(blank=True, choices=[(0, 'Negligible'), (1, 'Small'), (2, 'Modest'), (3, 'Moderate'), (4, 'Elevated')], help_text='Impact rating (0=negligible, 1=small, 2=modest, 3=moderate, 4=elevated)', null=True)),
('ca_dist', models.FloatField(blank=True, help_text='Close approach distance (lunar distances)', null=True)),
('arc', models.FloatField(blank=True, help_text='Arc length (days)', null=True)),
('rms', models.FloatField(blank=True, help_text='RMS of the residuals to the orbit fit (arcsec)', null=True)),
('uncertainty', models.FloatField(blank=True, help_text='1-sigma plane-of-sky positional uncertainty (arcmin)', null=True)),
('uncertainty_p1', models.FloatField(blank=True, help_text='1-sigma plane-of-sky positional uncertainty at +1 day (arcmin)', null=True)),
('uncertainty', models.FloatField(blank=True, verbose_name='Uncertainty', help_text='1-sigma plane-of-sky positional uncertainty (arcmin)', null=True)),
('uncertainty_p1', models.FloatField(blank=True, verbose_name='Uncertainty at +1 day', help_text='1-sigma plane-of-sky positional uncertainty at +1 day (arcmin)', null=True)),
('last_run', models.DateTimeField(blank=True, help_text='Last time the data was updated from Scout', null=True)),
('target', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='scout_detail', to='tom_targets.basetarget')),
],
Expand Down
23 changes: 16 additions & 7 deletions tom_jpl/models.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from django.db import models
from django.forms.models import model_to_dict

from tom_targets.models import BaseTarget

Expand All @@ -12,20 +13,25 @@ class ScoutImpactRating(models.IntegerChoices):
ELEVATED = 4, 'Elevated'
target = models.OneToOneField(BaseTarget, on_delete=models.CASCADE, related_name='scout_detail')
num_obs = models.IntegerField(null=True, blank=True, help_text='Number of observations')
neo_score = models.IntegerField(null=True, blank=True, help_text='NEO digest score (0..100)')
neo1km_score = models.IntegerField(null=True, blank=True, help_text='NEO >1km digest score (0..100)')
pha_score = models.IntegerField(null=True, blank=True, help_text='PHA digest score (0..100)')
ieo_score = models.IntegerField(null=True, blank=True, help_text='IEO digest score (0..100)')
geocentric_score = models.IntegerField(null=True, blank=True, help_text='Geocentric digest score (0..100)')
neo_score = models.IntegerField(null=True, blank=True, verbose_name='NEO Score',
help_text='NEO digest score (0..100)')
neo1km_score = models.IntegerField(null=True, blank=True, verbose_name='NEO >1km Score',
help_text='NEO >1km digest score (0..100)')
pha_score = models.IntegerField(null=True, blank=True, verbose_name='PHA Score',
help_text='PHA digest score (0..100)')
ieo_score = models.IntegerField(null=True, blank=True, verbose_name='IEO Score',
help_text='IEO digest score (0..100)')
geocentric_score = models.IntegerField(null=True, blank=True, verbose_name='Geocentric Score',
help_text='Geocentric digest score (0..100)')
impact_rating = models.IntegerField(null=True, blank=True, choices=ScoutImpactRating.choices,
help_text='Impact rating (0=negligible, 1=small, 2=modest, 3=moderate, '
'4=elevated)')
ca_dist = models.FloatField(null=True, blank=True, help_text='Close approach distance (lunar distances)')
arc = models.FloatField(null=True, blank=True, help_text='Arc length (days)')
rms = models.FloatField(null=True, blank=True, help_text='RMS of the residuals to the orbit fit (arcsec)')
uncertainty = models.FloatField(null=True, blank=True,
uncertainty = models.FloatField(null=True, blank=True, verbose_name='Uncertainty',
help_text='1-sigma plane-of-sky positional uncertainty (arcmin)')
uncertainty_p1 = models.FloatField(null=True, blank=True,
uncertainty_p1 = models.FloatField(null=True, blank=True, verbose_name='Uncertainty at +1 day',
help_text='1-sigma plane-of-sky positional uncertainty at +1 day (arcmin)')
last_run = models.DateTimeField(null=True, blank=True, help_text='Last time the data was updated from Scout')

Expand All @@ -34,3 +40,6 @@ class Meta:

def __str__(self):
return self.target.name + f' (Impact rating: {self.impact_rating})'

def as_dict(self):
return model_to_dict(self, fields=[field.name for field in self._meta.fields])
22 changes: 22 additions & 0 deletions tom_jpl/templates/tom_jpl/partials/scoutdetails_partial.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
{% load tom_common_extras %}
{% load scoutdetail_extras %}
<div class="card">
<div class="card-header">
<div style="display: flex; justify-content: space-between">
<h4 class="card-title">Scout Details</h4>
</div>
</div>
<div class="card-body">
<dl class="row">
{% for key, value in scoutdetail.as_dict.items %}
{% if value != None and value != '' and key != 'target' and key != 'id' %}
<dt class="col-sm-6" title="{% help_text scoutdetail key %}">{% verbose_name scoutdetail key %}</dt>
<dd class="col-sm-6">{{ value|truncate_value_for_display }}</dd>
{% endif %}
{% empty %}
No SCOUT details available.
{% endfor %}
</dl>
</div>

</div>
35 changes: 35 additions & 0 deletions tom_jpl/templatetags/scoutdetail_extras.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
from django import template
from django.core.exceptions import FieldDoesNotExist

from tom_jpl.models import ScoutDetail

register = template.Library()


@register.inclusion_tag('tom_jpl/partials/scoutdetails_list.html', takes_context=True)
def tab_context(context):
"""
Returns the ScoutDetails for the specific Target as context for the Target tab
for rendering into the template.
"""

target = context.get('target')
try:
scoutdetail = ScoutDetail.objects.get(target=target)
except ScoutDetail.DoesNotExist:
scoutdetail = {}
context = {'scoutdetail': scoutdetail}
return context


@register.simple_tag
def verbose_name(instance, field_name):
"""
Displays the more descriptive field name from a Django model field.
This is different from the version in tom_common_extras in that it
doesn't call .title() to preserve capitalization in the verbose name.
"""
try:
return instance._meta.get_field(field_name).verbose_name
except (FieldDoesNotExist, AttributeError):
return field_name.title()
10 changes: 10 additions & 0 deletions tom_jpl/tests/check_migrations.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
#!/usr/bin/env python
# django_shell.py

from django.core.management import call_command
from boot_django import boot_django, APP_NAME # noqa


boot_django()
print(f'checking migrations for {APP_NAME}')
call_command('makemigrations', APP_NAME, '--check')
21 changes: 21 additions & 0 deletions tom_jpl/tests/factories.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import factory

from tom_targets.models import Target
from tom_jpl.models import ScoutDetail


class SiderealTargetFactory(factory.django.DjangoModelFactory):
Expand Down Expand Up @@ -33,3 +34,23 @@ class Meta:
ephemeris_period_err = factory.Faker('pyfloat')
ephemeris_epoch = factory.Faker('pyfloat')
ephemeris_epoch_err = factory.Faker('pyfloat')


class ScoutDetailFactory(factory.django.DjangoModelFactory):
class Meta:
model = ScoutDetail

target = factory.SubFactory(NonSiderealTargetFactory) # assuming this exists in factories.py
num_obs = factory.Faker('random_int', min=1, max=100)
neo_score = factory.Faker('random_int', min=0, max=100)
neo1km_score = factory.Faker('random_int', min=0, max=100)
pha_score = factory.Faker('random_int', min=0, max=100)
ieo_score = factory.Faker('random_int', min=0, max=100)
geocentric_score = factory.Faker('random_int', min=0, max=100)
impact_rating = factory.Faker('random_int', min=0, max=4)
ca_dist = factory.Faker('pyfloat', min_value=0, max_value=100, right_digits=2)
arc = factory.Faker('pyfloat', min_value=0, max_value=30, right_digits=2)
rms = factory.Faker('pyfloat', min_value=0, max_value=5, right_digits=2)
uncertainty = factory.Faker('pyfloat', min_value=0, max_value=100, right_digits=2)
uncertainty_p1 = factory.Faker('pyfloat', min_value=0, max_value=100, right_digits=2)
last_run = factory.Faker('date_time_this_year')
30 changes: 28 additions & 2 deletions tom_jpl/tests/tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,12 @@
from dateutil.tz import tzutc

from django.test import SimpleTestCase, TestCase, RequestFactory
from django.template.loader import render_to_string
from django.contrib.auth.models import AnonymousUser
from unittest import mock

from tom_jpl.jpl import ScoutDataService, ScoutDetail
# from tom_dataservices.tests.factories import scout_resultsFactory
from tom_jpl.tests.factories import ScoutDetailFactory
from tom_targets.models import Target


Expand Down Expand Up @@ -643,6 +644,31 @@ def test_update_existing_target_from_query(self):
self.assertEqual(target.type, existing_target.type)
self.assertEqual(target.scheme, existing_target.scheme)
self.assertEqual(target.epoch_of_elements, existing_target.epoch_of_elements)
# Test for updated fields
self.assertNotAlmostEqual(target.eccentricity, existing_target.eccentricity, places=2)
self.assertNotAlmostEqual(target.mean_anomaly, existing_target.mean_anomaly, places=2)


class ScoutDetailsPartialTest(TestCase):

def _render_partial(self, scoutdetail):
return render_to_string(
'tom_jpl/partials/scoutdetails_partial.html',
{'scoutdetail': scoutdetail}
)

def test_excludes_none_values(self):
scoutdetail = ScoutDetailFactory(ieo_score=None)
rendered = self._render_partial(scoutdetail)
self.assertNotIn('IEO', rendered)

def test_excludes_target_and_id(self):
scoutdetail = ScoutDetailFactory()
rendered = self._render_partial(scoutdetail)
self.assertNotIn('target', rendered)
self.assertNotIn('id"', rendered) # avoid false positives on e.g. card-id

def test_displays_neo_score(self):
scoutdetail = ScoutDetailFactory(neo_score=78)
rendered = self._render_partial(scoutdetail)
self.assertIn('NEO', rendered)
self.assertIn('78', rendered)
Loading