[Feature] Basic logging of Jobs, Model Evaluation (#169)

Co-authored-by: Tim Lorsbach <tim@lorsba.ch>
Reviewed-on: enviPath/enviPy#169
This commit is contained in:
2025-10-27 22:34:05 +13:00
parent 551cfc7768
commit a952c08469
15 changed files with 556 additions and 240 deletions

View File

@ -3,7 +3,7 @@ from datetime import datetime
from tempfile import TemporaryDirectory
from django.test import TestCase, tag
from epdb.logic import PackageManager
from epdb.models import User, EnviFormer, Package, Setting, Pathway
from epdb.models import User, EnviFormer, Package, Setting
from epdb.tasks import predict_simple, predict
@ -48,9 +48,7 @@ class EnviFormerTest(TestCase):
mod.build_dataset()
mod.build_model()
mod.multigen_eval = True
mod.save()
mod.evaluate_model()
mod.evaluate_model(True, eval_packages_objs)
mod.predict("CCN(CC)C(=O)C1=CC(=CC=C1)C")
@ -75,11 +73,15 @@ class EnviFormerTest(TestCase):
# Test pathway prediction
times = [measure_predict(mods[1], self.BBD_SUBSET.pathways[0].pk) for _ in range(5)]
print(f"First pathway prediction took {times[0]} seconds, subsequent ones took {times[1:]}")
print(
f"First pathway prediction took {times[0]} seconds, subsequent ones took {times[1:]}"
)
# Test eviction by performing three prediction with every model, twice.
times = defaultdict(list)
for _ in range(2): # Eviction should cause the second iteration here to have to reload the models
for _ in range(
2
): # Eviction should cause the second iteration here to have to reload the models
for mod in mods:
for _ in range(3):
times[mod.pk].append(measure_predict(mod))