forked from enviPath/enviPy
[Feature] Basic logging of Jobs, Model Evaluation (#169)
Co-authored-by: Tim Lorsbach <tim@lorsba.ch> Reviewed-on: enviPath/enviPy#169
This commit is contained in:
@ -114,6 +114,6 @@ class Command(BaseCommand):
|
||||
print(f"Training {model_name}")
|
||||
model.build_model()
|
||||
print(f"Evaluating {model_name}")
|
||||
model.evaluate_model()
|
||||
model.evaluate_model(False, eval_packages=eval_packages)
|
||||
print(f"Saving {model_name}")
|
||||
model.save()
|
||||
|
||||
38
epdb/management/commands/update_job_logs.py
Normal file
38
epdb/management/commands/update_job_logs.py
Normal file
@ -0,0 +1,38 @@
|
||||
from datetime import date, timedelta
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import transaction
|
||||
|
||||
from epdb.models import JobLog
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
"--cleanup",
|
||||
type=int,
|
||||
default=None,
|
||||
help="Remove all logs older than this number of days. Default is None, which does not remove any logs.",
|
||||
)
|
||||
|
||||
@transaction.atomic
|
||||
def handle(self, *args, **options):
|
||||
if options["cleanup"] is not None:
|
||||
cleanup_dt = date.today() - timedelta(days=options["cleanup"])
|
||||
print(JobLog.objects.filter(created__lt=cleanup_dt).delete())
|
||||
|
||||
logs = JobLog.objects.filter(status="INITIAL")
|
||||
print(f"Found {logs.count()} logs to update")
|
||||
updated = 0
|
||||
for log in logs:
|
||||
res = log.check_for_update()
|
||||
if res:
|
||||
updated += 1
|
||||
|
||||
print(f"Updated {updated} logs")
|
||||
|
||||
from django.db.models import Count
|
||||
|
||||
qs = JobLog.objects.values("status").annotate(total=Count("status"))
|
||||
for r in qs:
|
||||
print(r["status"], r["total"])
|
||||
@ -2225,10 +2225,18 @@ class PackageBasedModel(EPModel):
|
||||
self.model_status = self.BUILT_NOT_EVALUATED
|
||||
self.save()
|
||||
|
||||
def evaluate_model(self):
|
||||
def evaluate_model(self, multigen: bool, eval_packages: List["Package"] = None):
|
||||
if self.model_status != self.BUILT_NOT_EVALUATED:
|
||||
raise ValueError(f"Can't evaluate a model in state {self.model_status}!")
|
||||
|
||||
if multigen:
|
||||
self.multigen_eval = multigen
|
||||
self.save()
|
||||
|
||||
if eval_packages is not None:
|
||||
for p in eval_packages:
|
||||
self.eval_packages.add(p)
|
||||
|
||||
self.model_status = self.EVALUATING
|
||||
self.save()
|
||||
|
||||
@ -2525,7 +2533,6 @@ class RuleBasedRelativeReasoning(PackageBasedModel):
|
||||
package: "Package",
|
||||
rule_packages: List["Package"],
|
||||
data_packages: List["Package"],
|
||||
eval_packages: List["Package"],
|
||||
threshold: float = 0.5,
|
||||
min_count: int = 10,
|
||||
max_count: int = 0,
|
||||
@ -2574,10 +2581,6 @@ class RuleBasedRelativeReasoning(PackageBasedModel):
|
||||
for p in rule_packages:
|
||||
rbrr.data_packages.add(p)
|
||||
|
||||
if eval_packages:
|
||||
for p in eval_packages:
|
||||
rbrr.eval_packages.add(p)
|
||||
|
||||
rbrr.save()
|
||||
|
||||
return rbrr
|
||||
@ -2632,7 +2635,6 @@ class MLRelativeReasoning(PackageBasedModel):
|
||||
package: "Package",
|
||||
rule_packages: List["Package"],
|
||||
data_packages: List["Package"],
|
||||
eval_packages: List["Package"],
|
||||
threshold: float = 0.5,
|
||||
name: "str" = None,
|
||||
description: str = None,
|
||||
@ -2672,10 +2674,6 @@ class MLRelativeReasoning(PackageBasedModel):
|
||||
for p in rule_packages:
|
||||
mlrr.data_packages.add(p)
|
||||
|
||||
if eval_packages:
|
||||
for p in eval_packages:
|
||||
mlrr.eval_packages.add(p)
|
||||
|
||||
if build_app_domain:
|
||||
ad = ApplicabilityDomain.create(
|
||||
mlrr,
|
||||
@ -2995,7 +2993,6 @@ class EnviFormer(PackageBasedModel):
|
||||
def create(
|
||||
package: "Package",
|
||||
data_packages: List["Package"],
|
||||
eval_packages: List["Package"],
|
||||
threshold: float = 0.5,
|
||||
name: "str" = None,
|
||||
description: str = None,
|
||||
@ -3028,10 +3025,6 @@ class EnviFormer(PackageBasedModel):
|
||||
for p in data_packages:
|
||||
mod.data_packages.add(p)
|
||||
|
||||
if eval_packages:
|
||||
for p in eval_packages:
|
||||
mod.eval_packages.add(p)
|
||||
|
||||
# if build_app_domain:
|
||||
# ad = ApplicabilityDomain.create(mod, app_domain_num_neighbours, app_domain_reliability_threshold,
|
||||
# app_domain_local_compatibility_threshold)
|
||||
@ -3144,10 +3137,18 @@ class EnviFormer(PackageBasedModel):
|
||||
args = {"clz": "EnviFormer"}
|
||||
return args
|
||||
|
||||
def evaluate_model(self):
|
||||
def evaluate_model(self, multigen: bool, eval_packages: List["Package"] = None):
|
||||
if self.model_status != self.BUILT_NOT_EVALUATED:
|
||||
raise ValueError(f"Can't evaluate a model in state {self.model_status}!")
|
||||
|
||||
if multigen:
|
||||
self.multigen_eval = multigen
|
||||
self.save()
|
||||
|
||||
if eval_packages is not None:
|
||||
for p in eval_packages:
|
||||
self.eval_packages.add(p)
|
||||
|
||||
self.model_status = self.EVALUATING
|
||||
self.save()
|
||||
|
||||
@ -3671,3 +3672,53 @@ class Setting(EnviPathModel):
|
||||
self.public = True
|
||||
self.global_default = True
|
||||
self.save()
|
||||
|
||||
|
||||
class JobLogStatus(models.TextChoices):
|
||||
INITIAL = "INITIAL", "Initial"
|
||||
SUCCESS = "SUCCESS", "Success"
|
||||
FAILURE = "FAILURE", "Failure"
|
||||
REVOKED = "REVOKED", "Revoked"
|
||||
IGNORED = "IGNORED", "Ignored"
|
||||
|
||||
|
||||
class JobLog(TimeStampedModel):
|
||||
user = models.ForeignKey("epdb.User", models.CASCADE)
|
||||
task_id = models.UUIDField(unique=True)
|
||||
job_name = models.TextField(null=False, blank=False)
|
||||
status = models.CharField(
|
||||
max_length=20,
|
||||
choices=JobLogStatus.choices,
|
||||
default=JobLogStatus.INITIAL,
|
||||
)
|
||||
|
||||
done_at = models.DateTimeField(null=True, blank=True, default=None)
|
||||
task_result = models.TextField(null=True, blank=True, default=None)
|
||||
|
||||
def check_for_update(self):
|
||||
async_res = self.get_result()
|
||||
new_status = async_res.state
|
||||
|
||||
TERMINAL_STATES = [
|
||||
"SUCCESS",
|
||||
"FAILURE",
|
||||
"REVOKED",
|
||||
"IGNORED",
|
||||
]
|
||||
|
||||
if new_status != self.status and new_status in TERMINAL_STATES:
|
||||
self.status = new_status
|
||||
self.done_at = async_res.date_done
|
||||
|
||||
if new_status == "SUCCESS":
|
||||
self.task_result = async_res.result
|
||||
|
||||
self.save()
|
||||
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_result(self):
|
||||
from celery.result import AsyncResult
|
||||
|
||||
return AsyncResult(str(self.task_id))
|
||||
|
||||
117
epdb/tasks.py
117
epdb/tasks.py
@ -1,10 +1,13 @@
|
||||
import logging
|
||||
from typing import Optional
|
||||
from celery.utils.functional import LRUCache
|
||||
from celery import shared_task
|
||||
from epdb.models import Pathway, Node, EPModel, Setting
|
||||
from epdb.logic import SPathway
|
||||
from datetime import datetime
|
||||
from typing import Callable, Optional
|
||||
from uuid import uuid4
|
||||
|
||||
from celery import shared_task
|
||||
from celery.utils.functional import LRUCache
|
||||
|
||||
from epdb.logic import SPathway
|
||||
from epdb.models import EPModel, JobLog, Node, Package, Pathway, Setting, User
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
ML_CACHE = LRUCache(3) # Cache the three most recent ML models to reduce load times.
|
||||
@ -16,6 +19,40 @@ def get_ml_model(model_pk: int):
|
||||
return ML_CACHE[model_pk]
|
||||
|
||||
|
||||
def dispatch_eager(user: "User", job: Callable, *args, **kwargs):
|
||||
try:
|
||||
x = job(*args, **kwargs)
|
||||
log = JobLog()
|
||||
log.user = user
|
||||
log.task_id = uuid4()
|
||||
log.job_name = job.__name__
|
||||
log.status = "SUCCESS"
|
||||
log.done_at = datetime.now()
|
||||
log.task_result = str(x) if x else None
|
||||
log.save()
|
||||
|
||||
return x
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
raise e
|
||||
|
||||
|
||||
def dispatch(user: "User", job: Callable, *args, **kwargs):
|
||||
try:
|
||||
x = job.delay(*args, **kwargs)
|
||||
log = JobLog()
|
||||
log.user = user
|
||||
log.task_id = x.task_id
|
||||
log.job_name = job.__name__
|
||||
log.status = "INITIAL"
|
||||
log.save()
|
||||
|
||||
return x.result
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
raise e
|
||||
|
||||
|
||||
@shared_task(queue="background")
|
||||
def mul(a, b):
|
||||
return a * b
|
||||
@ -33,17 +70,55 @@ def send_registration_mail(user_pk: int):
|
||||
pass
|
||||
|
||||
|
||||
@shared_task(queue="model")
|
||||
def build_model(model_pk: int):
|
||||
@shared_task(bind=True, queue="model")
|
||||
def build_model(self, model_pk: int):
|
||||
mod = EPModel.objects.get(id=model_pk)
|
||||
|
||||
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||
JobLog.objects.filter(task_id=self.request.id).update(status="RUNNING", task_result=mod.url)
|
||||
|
||||
try:
|
||||
mod.build_dataset()
|
||||
mod.build_model()
|
||||
except Exception as e:
|
||||
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||
JobLog.objects.filter(task_id=self.request.id).update(
|
||||
status="FAILED", task_result=mod.url
|
||||
)
|
||||
|
||||
raise e
|
||||
|
||||
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||
JobLog.objects.filter(task_id=self.request.id).update(status="SUCCESS", task_result=mod.url)
|
||||
|
||||
return mod.url
|
||||
|
||||
|
||||
@shared_task(queue="model")
|
||||
def evaluate_model(model_pk: int):
|
||||
@shared_task(bind=True, queue="model")
|
||||
def evaluate_model(self, model_pk: int, multigen: bool, package_pks: Optional[list] = None):
|
||||
packages = None
|
||||
|
||||
if package_pks:
|
||||
packages = Package.objects.filter(pk__in=package_pks)
|
||||
|
||||
mod = EPModel.objects.get(id=model_pk)
|
||||
mod.evaluate_model()
|
||||
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||
JobLog.objects.filter(task_id=self.request.id).update(status="RUNNING", task_result=mod.url)
|
||||
|
||||
try:
|
||||
mod.evaluate_model(multigen, eval_packages=packages)
|
||||
except Exception as e:
|
||||
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||
JobLog.objects.filter(task_id=self.request.id).update(
|
||||
status="FAILED", task_result=mod.url
|
||||
)
|
||||
|
||||
raise e
|
||||
|
||||
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||
JobLog.objects.filter(task_id=self.request.id).update(status="SUCCESS", task_result=mod.url)
|
||||
|
||||
return mod.url
|
||||
|
||||
|
||||
@shared_task(queue="model")
|
||||
@ -52,9 +127,13 @@ def retrain(model_pk: int):
|
||||
mod.retrain()
|
||||
|
||||
|
||||
@shared_task(queue="predict")
|
||||
@shared_task(bind=True, queue="predict")
|
||||
def predict(
|
||||
pw_pk: int, pred_setting_pk: int, limit: Optional[int] = None, node_pk: Optional[int] = None
|
||||
self,
|
||||
pw_pk: int,
|
||||
pred_setting_pk: int,
|
||||
limit: Optional[int] = None,
|
||||
node_pk: Optional[int] = None,
|
||||
) -> Pathway:
|
||||
pw = Pathway.objects.get(id=pw_pk)
|
||||
setting = Setting.objects.get(id=pred_setting_pk)
|
||||
@ -65,6 +144,9 @@ def predict(
|
||||
pw.kv.update(**{"status": "running"})
|
||||
pw.save()
|
||||
|
||||
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||
JobLog.objects.filter(task_id=self.request.id).update(status="RUNNING", task_result=pw.url)
|
||||
|
||||
try:
|
||||
# regular prediction
|
||||
if limit is not None:
|
||||
@ -89,7 +171,18 @@ def predict(
|
||||
except Exception as e:
|
||||
pw.kv.update({"status": "failed"})
|
||||
pw.save()
|
||||
|
||||
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||
JobLog.objects.filter(task_id=self.request.id).update(
|
||||
status="FAILED", task_result=pw.url
|
||||
)
|
||||
|
||||
raise e
|
||||
|
||||
pw.kv.update(**{"status": "completed"})
|
||||
pw.save()
|
||||
|
||||
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||
JobLog.objects.filter(task_id=self.request.id).update(status="SUCCESS", task_result=pw.url)
|
||||
|
||||
return pw.url
|
||||
|
||||
@ -1,8 +1,21 @@
|
||||
from django import template
|
||||
from pydantic import AnyHttpUrl, ValidationError
|
||||
from pydantic.type_adapter import TypeAdapter
|
||||
|
||||
register = template.Library()
|
||||
|
||||
url_adapter = TypeAdapter(AnyHttpUrl)
|
||||
|
||||
|
||||
@register.filter
|
||||
def classname(obj):
|
||||
return obj.__class__.__name__
|
||||
|
||||
|
||||
@register.filter
|
||||
def is_url(value):
|
||||
try:
|
||||
url_adapter.validate_python(value)
|
||||
return True
|
||||
except ValidationError:
|
||||
return False
|
||||
|
||||
@ -190,6 +190,7 @@ urlpatterns = [
|
||||
re_path(r"^indigo/dearomatize$", v.dearomatize, name="indigo_dearomatize"),
|
||||
re_path(r"^indigo/layout$", v.layout, name="indigo_layout"),
|
||||
re_path(r"^depict$", v.depict, name="depict"),
|
||||
re_path(r"^jobs", v.jobs, name="jobs"),
|
||||
# OAuth Stuff
|
||||
path("o/userinfo/", v.userinfo, name="oauth_userinfo"),
|
||||
]
|
||||
|
||||
119
epdb/views.py
119
epdb/views.py
@ -47,6 +47,7 @@ from .models import (
|
||||
ExternalDatabase,
|
||||
ExternalIdentifier,
|
||||
EnzymeLink,
|
||||
JobLog,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -754,8 +755,8 @@ def package_models(request, package_uuid):
|
||||
context["unreviewed_objects"] = unreviewed_model_qs
|
||||
|
||||
context["model_types"] = {
|
||||
"ML Relative Reasoning": "ml-relative-reasoning",
|
||||
"Rule Based Relative Reasoning": "rule-based-relative-reasoning",
|
||||
"ML Relative Reasoning": "mlrr",
|
||||
"Rule Based Relative Reasoning": "rbrr",
|
||||
}
|
||||
|
||||
if s.FLAGS.get("ENVIFORMER", False):
|
||||
@ -775,48 +776,40 @@ def package_models(request, package_uuid):
|
||||
|
||||
model_type = request.POST.get("model-type")
|
||||
|
||||
if model_type == "enviformer":
|
||||
threshold = float(request.POST.get(f"{model_type}-threshold", 0.5))
|
||||
|
||||
mod = EnviFormer.create(current_package, name, description, threshold)
|
||||
|
||||
elif model_type == "ml-relative-reasoning" or model_type == "rule-based-relative-reasoning":
|
||||
# Generic fields for ML and Rule Based
|
||||
rule_packages = request.POST.getlist("package-based-relative-reasoning-rule-packages")
|
||||
data_packages = request.POST.getlist("package-based-relative-reasoning-data-packages")
|
||||
eval_packages = request.POST.getlist(
|
||||
"package-based-relative-reasoning-evaluation-packages", []
|
||||
)
|
||||
rule_packages = request.POST.getlist("model-rule-packages")
|
||||
data_packages = request.POST.getlist("model-data-packages")
|
||||
|
||||
# Generic params
|
||||
params = {
|
||||
"package": current_package,
|
||||
"name": name,
|
||||
"description": description,
|
||||
"rule_packages": [
|
||||
PackageManager.get_package_by_url(current_user, p) for p in rule_packages
|
||||
],
|
||||
"data_packages": [
|
||||
PackageManager.get_package_by_url(current_user, p) for p in data_packages
|
||||
],
|
||||
"eval_packages": [
|
||||
PackageManager.get_package_by_url(current_user, p) for p in eval_packages
|
||||
],
|
||||
}
|
||||
|
||||
if model_type == "ml-relative-reasoning":
|
||||
if model_type == "enviformer":
|
||||
threshold = float(request.POST.get("model-threshold", 0.5))
|
||||
params["threshold"] = threshold
|
||||
|
||||
mod = EnviFormer.create(**params)
|
||||
elif model_type == "mlrr":
|
||||
# ML Specific
|
||||
threshold = float(request.POST.get(f"{model_type}-threshold", 0.5))
|
||||
threshold = float(request.POST.get("model-threshold", 0.5))
|
||||
# TODO handle additional fingerprinter
|
||||
# fingerprinter = request.POST.get(f"{model_type}-fingerprinter")
|
||||
# fingerprinter = request.POST.get("model-fingerprinter")
|
||||
|
||||
params["rule_packages"] = [
|
||||
PackageManager.get_package_by_url(current_user, p) for p in rule_packages
|
||||
]
|
||||
|
||||
# App Domain related parameters
|
||||
build_ad = request.POST.get("build-app-domain", False) == "on"
|
||||
num_neighbors = request.POST.get("num-neighbors", 5)
|
||||
reliability_threshold = request.POST.get("reliability-threshold", 0.5)
|
||||
local_compatibility_threshold = request.POST.get(
|
||||
"local-compatibility-threshold", 0.5
|
||||
)
|
||||
local_compatibility_threshold = request.POST.get("local-compatibility-threshold", 0.5)
|
||||
|
||||
params["threshold"] = threshold
|
||||
# params['fingerprinter'] = fingerprinter
|
||||
@ -826,18 +819,24 @@ def package_models(request, package_uuid):
|
||||
params["app_domain_local_compatibility_threshold"] = local_compatibility_threshold
|
||||
|
||||
mod = MLRelativeReasoning.create(**params)
|
||||
else:
|
||||
elif model_type == "rbrr":
|
||||
params["rule_packages"] = [
|
||||
PackageManager.get_package_by_url(current_user, p) for p in rule_packages
|
||||
]
|
||||
|
||||
mod = RuleBasedRelativeReasoning.create(**params)
|
||||
|
||||
from .tasks import build_model
|
||||
|
||||
build_model.delay(mod.pk)
|
||||
elif s.FLAGS.get("PLUGINS", False) and model_type in s.CLASSIFIER_PLUGINS.values():
|
||||
pass
|
||||
else:
|
||||
return error(
|
||||
request, "Invalid model type.", f'Model type "{model_type}" is not supported."'
|
||||
)
|
||||
return redirect(mod.url)
|
||||
|
||||
from .tasks import dispatch, build_model
|
||||
|
||||
dispatch(current_user, build_model, mod.pk)
|
||||
|
||||
return redirect(mod.url)
|
||||
else:
|
||||
return HttpResponseNotAllowed(["GET", "POST"])
|
||||
|
||||
@ -865,6 +864,10 @@ def package_model(request, package_uuid, model_uuid):
|
||||
return JsonResponse({"error": f'"{smiles}" is not a valid SMILES'}, status=400)
|
||||
|
||||
if classify:
|
||||
from epdb.tasks import dispatch_eager, predict_simple
|
||||
|
||||
res = dispatch_eager(current_user, predict_simple, current_model.pk, stand_smiles)
|
||||
|
||||
pred_res = current_model.predict(stand_smiles)
|
||||
res = []
|
||||
|
||||
@ -909,9 +912,25 @@ def package_model(request, package_uuid, model_uuid):
|
||||
current_model.delete()
|
||||
return redirect(current_package.url + "/model")
|
||||
elif hidden == "evaluate":
|
||||
from .tasks import evaluate_model
|
||||
from .tasks import dispatch, evaluate_model
|
||||
|
||||
eval_type = request.POST.get("model-evaluation-type")
|
||||
|
||||
if eval_type not in ["sg", "mg"]:
|
||||
return error(
|
||||
request,
|
||||
"Invalid evaluation type",
|
||||
f'Evaluation type "{eval_type}" is not supported. Only "sg" and "mg" are supported.',
|
||||
)
|
||||
|
||||
multigen = eval_type == "mg"
|
||||
|
||||
eval_packages = request.POST.getlist("model-evaluation-packages")
|
||||
eval_package_ids = [
|
||||
PackageManager.get_package_by_url(current_user, p).id for p in eval_packages
|
||||
]
|
||||
dispatch(current_user, evaluate_model, current_model.pk, multigen, eval_package_ids)
|
||||
|
||||
evaluate_model.delay(current_model.pk)
|
||||
return redirect(current_model.url)
|
||||
else:
|
||||
return HttpResponseBadRequest()
|
||||
@ -1809,9 +1828,9 @@ def package_pathways(request, package_uuid):
|
||||
pw.setting = prediction_setting
|
||||
pw.save()
|
||||
|
||||
from .tasks import predict
|
||||
from .tasks import dispatch, predict
|
||||
|
||||
predict.delay(pw.pk, prediction_setting.pk, limit=limit)
|
||||
dispatch(current_user, predict, pw.pk, prediction_setting.pk, limit=limit)
|
||||
|
||||
return redirect(pw.url)
|
||||
|
||||
@ -1930,10 +1949,16 @@ def package_pathway(request, package_uuid, pathway_uuid):
|
||||
if node_url:
|
||||
n = current_pathway.get_node(node_url)
|
||||
|
||||
from .tasks import predict
|
||||
from .tasks import dispatch, predict
|
||||
|
||||
dispatch(
|
||||
current_user,
|
||||
predict,
|
||||
current_pathway.pk,
|
||||
current_pathway.prediction_setting.pk,
|
||||
node_pk=n.pk,
|
||||
)
|
||||
|
||||
# Dont delay?
|
||||
predict(current_pathway.pk, current_pathway.setting.pk, node_pk=n.pk)
|
||||
return JsonResponse({"success": current_pathway.url})
|
||||
|
||||
return HttpResponseBadRequest()
|
||||
@ -2705,6 +2730,24 @@ def setting(request, setting_uuid):
|
||||
pass
|
||||
|
||||
|
||||
def jobs(request):
|
||||
current_user = _anonymous_or_real(request)
|
||||
context = get_base_context(request)
|
||||
|
||||
if request.method == "GET":
|
||||
context["object_type"] = "joblog"
|
||||
context["breadcrumbs"] = [
|
||||
{"Home": s.SERVER_URL},
|
||||
{"Jobs": s.SERVER_URL + "/jobs"},
|
||||
]
|
||||
if current_user.is_superuser:
|
||||
context["jobs"] = JobLog.objects.all().order_by("-created")
|
||||
else:
|
||||
context["jobs"] = JobLog.objects.filter(user=current_user).order_by("-created")
|
||||
|
||||
return render(request, "collections/joblog.html", context)
|
||||
|
||||
|
||||
###########
|
||||
# KETCHER #
|
||||
###########
|
||||
|
||||
71
templates/collections/joblog.html
Normal file
71
templates/collections/joblog.html
Normal file
@ -0,0 +1,71 @@
|
||||
{% extends "framework.html" %}
|
||||
{% load static %}
|
||||
{% load envipytags %}
|
||||
{% block content %}
|
||||
|
||||
<div class="panel-group" id="reviewListAccordion">
|
||||
<div class="panel panel-default">
|
||||
<div class="panel-heading" id="headingPanel" style="font-size:2rem;height: 46px">
|
||||
Jobs
|
||||
</div>
|
||||
<div class="panel-body">
|
||||
<p>
|
||||
Job Logs Desc
|
||||
</p>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="panel panel-default panel-heading list-group-item" style="background-color:silver">
|
||||
<h4 class="panel-title">
|
||||
<a id="job-accordion-link" data-toggle="collapse" data-parent="#job-accordion" href="#jobs">
|
||||
Jobs
|
||||
</a>
|
||||
</h4>
|
||||
</div>
|
||||
<div id="jobs"
|
||||
class="panel-collapse collapse in">
|
||||
<div class="panel-body list-group-item" id="job-content">
|
||||
<table class="table table-bordered table-hover">
|
||||
<tr style="background-color: rgba(0, 0, 0, 0.08);">
|
||||
<th scope="col">ID</th>
|
||||
<th scope="col">Name</th>
|
||||
<th scope="col">Status</th>
|
||||
<th scope="col">Queued</th>
|
||||
<th scope="col">Done</th>
|
||||
<th scope="col">Result</th>
|
||||
</tr>
|
||||
<tbody>
|
||||
{% for job in jobs %}
|
||||
<tr>
|
||||
<td>{{ job.task_id }}</td>
|
||||
<td>{{ job.job_name }}</td>
|
||||
<td>{{ job.status }}</td>
|
||||
<td>{{ job.created }}</td>
|
||||
<td>{{ job.done_at }}</td>
|
||||
{% if job.task_result and job.task_result|is_url == True %}
|
||||
<td><a href="{{ job.task_result }}">Result</a></td>
|
||||
{% elif job.task_result %}
|
||||
<td>{{ job.task_result|slice:"40" }}...</td>
|
||||
{% else %}
|
||||
<td>Empty</td>
|
||||
{% endif %}
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Unreviewable objects such as User / Group / Setting -->
|
||||
<ul class='list-group'>
|
||||
{% for obj in objects %}
|
||||
{% if object_type == 'user' %}
|
||||
<a class="list-group-item" href="{{ obj.url }}">{{ obj.username }}</a>
|
||||
{% else %}
|
||||
<a class="list-group-item" href="{{ obj.url }}">{{ obj.name }}</a>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock content %}
|
||||
@ -18,13 +18,19 @@
|
||||
prediction. You just need to set a name and the packages
|
||||
you want the object to be based on. There are multiple types of models available.
|
||||
For additional information have a look at our
|
||||
<a target="_blank" href="https://wiki.envipath.org/index.php/relative-reasoning" role="button">wiki >></a>
|
||||
<a target="_blank" href="https://wiki.envipath.org/index.php/relative-reasoning" role="button">wiki
|
||||
>></a>
|
||||
</div>
|
||||
<!-- Name -->
|
||||
<label for="model-name">Name</label>
|
||||
<input id="model-name" name="model-name" class="form-control" placeholder="Name"/>
|
||||
|
||||
<!-- Description -->
|
||||
<label for="model-description">Description</label>
|
||||
<input id="model-description" name="model-description" class="form-control"
|
||||
placeholder="Description"/>
|
||||
|
||||
<!-- Model Type -->
|
||||
<label for="model-type">Model Type</label>
|
||||
<select id="model-type" name="model-type" class="form-control" data-width='100%'>
|
||||
<option disabled selected>Select Model Type</option>
|
||||
@ -32,12 +38,12 @@
|
||||
<option value="{{ v }}">{{ k }}</option>
|
||||
{% endfor %}
|
||||
</select>
|
||||
<!-- ML and Rule Based Based Form-->
|
||||
<div id="package-based-relative-reasoning-specific-form">
|
||||
|
||||
<!-- Rule Packages -->
|
||||
<label for="package-based-relative-reasoning-rule-packages">Rule Packages</label>
|
||||
<select id="package-based-relative-reasoning-rule-packages" name="package-based-relative-reasoning-rule-packages"
|
||||
data-actions-box='true' class="form-control" multiple data-width='100%'>
|
||||
<div id="rule-packages" class="ep-model-param mlrr rbrr">
|
||||
<label for="model-rule-packages">Rule Packages</label>
|
||||
<select id="model-rule-packages" name="model-rule-packages" data-actions-box='true'
|
||||
class="form-control" multiple data-width='100%'>
|
||||
<option disabled>Reviewed Packages</option>
|
||||
{% for obj in meta.readable_packages %}
|
||||
{% if obj.reviewed %}
|
||||
@ -52,10 +58,13 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<!-- Data Packages -->
|
||||
<label for="package-based-relative-reasoning-data-packages" >Data Packages</label>
|
||||
<select id="package-based-relative-reasoning-data-packages" name="package-based-relative-reasoning-data-packages"
|
||||
data-actions-box='true' class="form-control" multiple data-width='100%'>
|
||||
<div id="data-packages" class="ep-model-param mlrr rbrr enviformer">
|
||||
<label for="model-data-packages">Data Packages</label>
|
||||
<select id="model-data-packages" name="model-data-packages" data-actions-box='true'
|
||||
class="form-control" multiple data-width='100%'>
|
||||
<option disabled>Reviewed Packages</option>
|
||||
{% for obj in meta.readable_packages %}
|
||||
{% if obj.reviewed %}
|
||||
@ -70,32 +79,31 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<div id="ml-relative-reasoning-specific-form">
|
||||
<!-- Fingerprinter -->
|
||||
<label for="ml-relative-reasoning-fingerprinter">Fingerprinter</label>
|
||||
<select id="ml-relative-reasoning-fingerprinter" name="ml-relative-reasoning-fingerprinter"
|
||||
class="form-control">
|
||||
<div id="fingerprinter" class="ep-model-param mlrr">
|
||||
<label for="model-fingerprinter">Fingerprinter</label>
|
||||
<select id="model-fingerprinter" name="model-fingerprinter" data-actions-box='true'
|
||||
class="form-control" multiple data-width='100%'>
|
||||
<option value="MACCS" selected>MACCS Fingerprinter</option>
|
||||
</select>
|
||||
{% if meta.enabled_features.PLUGINS and additional_descriptors %}
|
||||
<!-- Property Plugins go here -->
|
||||
<label for="ml-relative-reasoning-additional-fingerprinter">Additional Fingerprinter /
|
||||
Descriptors</label>
|
||||
<select id="ml-relative-reasoning-additional-fingerprinter"
|
||||
name="ml-relative-reasoning-additional-fingerprinter" class="form-control">
|
||||
<option disabled selected>Select Additional Fingerprinter / Descriptor</option>
|
||||
{% for k, v in additional_descriptors.items %}
|
||||
<option value="{{ v }}">{{ k }}</option>
|
||||
{% endfor %}
|
||||
</select>
|
||||
{% endif %}
|
||||
|
||||
<label for="ml-relative-reasoning-threshold">Threshold</label>
|
||||
<input type="number" min="0" max="1" step="0.05" value="0.5"
|
||||
id="ml-relative-reasoning-threshold"
|
||||
name="ml-relative-reasoning-threshold" class="form-control">
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<!-- Threshold -->
|
||||
<div id="threshold" class="ep-model-param mlrr enviformer">
|
||||
<label for="model-threshold">Threshold</label>
|
||||
<input type="number" min="0" max="1" step="0.05" value="0.5" id="model-threshold"
|
||||
name="model-threshold" class="form-control">
|
||||
</div>
|
||||
|
||||
<div id="appdomain" class="ep-model-param mlrr">
|
||||
{% if meta.enabled_features.APPLICABILITY_DOMAIN %}
|
||||
<!-- Build AD? -->
|
||||
<div class="checkbox">
|
||||
@ -107,11 +115,13 @@
|
||||
<div id="ad-params" style="display:none">
|
||||
<!-- Num Neighbors -->
|
||||
<label for="num-neighbors">Number of Neighbors</label>
|
||||
<input id="num-neighbors" name="num-neighbors" type="number" class="form-control" value="5"
|
||||
<input id="num-neighbors" name="num-neighbors" type="number" class="form-control"
|
||||
value="5"
|
||||
step="1" min="0" max="10">
|
||||
<!-- Local Compatibility -->
|
||||
<label for="local-compatibility-threshold">Local Compatibility Threshold</label>
|
||||
<input id="local-compatibility-threshold" name="local-compatibility-threshold" type="number"
|
||||
<input id="local-compatibility-threshold" name="local-compatibility-threshold"
|
||||
type="number"
|
||||
class="form-control" value="0.5" step="0.01" min="0" max="1">
|
||||
<!-- Reliability -->
|
||||
<label for="reliability-threshold">Reliability Threshold</label>
|
||||
@ -120,12 +130,6 @@
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
<!-- EnviFormer-->
|
||||
<div id="enviformer-specific-form">
|
||||
<label for="enviformer-threshold">Threshold</label>
|
||||
<input type="number" min="0" max="1" step="0.05" value="0.5" id="enviformer-threshold"
|
||||
name="enviformer-threshold" class="form-control">
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
@ -137,20 +141,23 @@
|
||||
</div>
|
||||
|
||||
<script>
|
||||
$(function() {
|
||||
$(function () {
|
||||
// Built in Model Types
|
||||
var nativeModelTypes = [
|
||||
"mlrr",
|
||||
"rbrr",
|
||||
"enviformer",
|
||||
]
|
||||
|
||||
// Initially hide all "specific" forms
|
||||
$("div[id$='-specific-form']").each( function() {
|
||||
$(".ep-model-param").each(function () {
|
||||
$(this).hide();
|
||||
});
|
||||
|
||||
$('#model-type').selectpicker();
|
||||
$("#ml-relative-reasoning-fingerprinter").selectpicker();
|
||||
$("#package-based-relative-reasoning-rule-packages").selectpicker();
|
||||
$("#package-based-relative-reasoning-data-packages").selectpicker();
|
||||
$("#package-based-relative-reasoning-evaluation-packages").selectpicker();
|
||||
if ($('#ml-relative-reasoning-additional-fingerprinter').length > 0) {
|
||||
$("#ml-relative-reasoning-additional-fingerprinter").selectpicker();
|
||||
}
|
||||
$("#model-fingerprinter").selectpicker();
|
||||
$("#model-rule-packages").selectpicker();
|
||||
$("#model-data-packages").selectpicker();
|
||||
|
||||
$("#build-app-domain").change(function () {
|
||||
if ($(this).is(":checked")) {
|
||||
@ -161,29 +168,20 @@ $(function() {
|
||||
});
|
||||
|
||||
// On change hide all and show only selected
|
||||
$("#model-type").change(function() {
|
||||
$("div[id$='-specific-form']").each( function() {
|
||||
$(this).hide();
|
||||
});
|
||||
val = $('option:selected', this).val();
|
||||
|
||||
if (val === 'ml-relative-reasoning' || val === 'rule-based-relative-reasoning') {
|
||||
$("#package-based-relative-reasoning-specific-form").show();
|
||||
if (val === 'ml-relative-reasoning') {
|
||||
$("#ml-relative-reasoning-specific-form").show();
|
||||
}
|
||||
$("#model-type").change(function () {
|
||||
$('.ep-model-param').hide();
|
||||
var modelType = $('#model-type').val();
|
||||
if (nativeModelTypes.indexOf(modelType) !== -1) {
|
||||
$('.' + modelType).show();
|
||||
} else {
|
||||
$("#" + val + "-specific-form").show();
|
||||
// do nothing
|
||||
}
|
||||
});
|
||||
|
||||
$('#new_model_modal_form_submit').on('click', function(e){
|
||||
$('#new_model_modal_form_submit').on('click', function (e) {
|
||||
e.preventDefault();
|
||||
$('#new_model_form').submit();
|
||||
});
|
||||
|
||||
});
|
||||
|
||||
|
||||
|
||||
});
|
||||
</script>
|
||||
|
||||
@ -17,10 +17,10 @@
|
||||
For evaluation, you need to select the packages you want to use.
|
||||
While the model is evaluating, you can use the model for predictions.
|
||||
</div>
|
||||
<!-- Evaluation -->
|
||||
<label for="relative-reasoning-evaluation-packages">Evaluation Packages</label>
|
||||
<select id="relative-reasoning-evaluation-packages" name=relative-reasoning-evaluation-packages"
|
||||
data-actions-box='true' class="form-control" multiple data-width='100%'>
|
||||
<!-- Evaluation Packages -->
|
||||
<label for="model-evaluation-packages">Evaluation Packages</label>
|
||||
<select id="model-evaluation-packages" name="model-evaluation-packages" data-actions-box='true'
|
||||
class="form-control" multiple data-width='100%'>
|
||||
<option disabled>Reviewed Packages</option>
|
||||
{% for obj in meta.readable_packages %}
|
||||
{% if obj.reviewed %}
|
||||
@ -35,6 +35,15 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</select>
|
||||
|
||||
<!-- Eval Type -->
|
||||
<label for="model-evaluation-type">Evaluation Type</label>
|
||||
<select id="model-evaluation-type" name="model-evaluation-type" class="form-control">
|
||||
<option disabled selected>Select evaluation type</option>
|
||||
<option value="sg">Single Generation</option>
|
||||
<option value="mg">Multiple Generations</option>
|
||||
</select>
|
||||
|
||||
<input type="hidden" name="hidden" value="evaluate">
|
||||
</form>
|
||||
</div>
|
||||
@ -50,7 +59,7 @@
|
||||
|
||||
$(function () {
|
||||
|
||||
$("#relative-reasoning-evaluation-packages").selectpicker();
|
||||
$("#model-evaluation-packages").selectpicker();
|
||||
|
||||
$('#evaluate_model_form_submit').on('click', function (e) {
|
||||
e.preventDefault();
|
||||
|
||||
@ -117,7 +117,7 @@
|
||||
<!-- End Predict Panel -->
|
||||
{% endif %}
|
||||
|
||||
{% if model.app_domain %}
|
||||
{% if model.ready_for_prediction and model.app_domain %}
|
||||
<!-- App Domain -->
|
||||
<div class="panel panel-default panel-heading list-group-item" style="background-color:silver">
|
||||
<h4 class="panel-title">
|
||||
|
||||
@ -3,7 +3,7 @@ from datetime import datetime
|
||||
from tempfile import TemporaryDirectory
|
||||
from django.test import TestCase, tag
|
||||
from epdb.logic import PackageManager
|
||||
from epdb.models import User, EnviFormer, Package, Setting, Pathway
|
||||
from epdb.models import User, EnviFormer, Package, Setting
|
||||
from epdb.tasks import predict_simple, predict
|
||||
|
||||
|
||||
@ -48,9 +48,7 @@ class EnviFormerTest(TestCase):
|
||||
|
||||
mod.build_dataset()
|
||||
mod.build_model()
|
||||
mod.multigen_eval = True
|
||||
mod.save()
|
||||
mod.evaluate_model()
|
||||
mod.evaluate_model(True, eval_packages_objs)
|
||||
|
||||
mod.predict("CCN(CC)C(=O)C1=CC(=CC=C1)C")
|
||||
|
||||
@ -75,11 +73,15 @@ class EnviFormerTest(TestCase):
|
||||
|
||||
# Test pathway prediction
|
||||
times = [measure_predict(mods[1], self.BBD_SUBSET.pathways[0].pk) for _ in range(5)]
|
||||
print(f"First pathway prediction took {times[0]} seconds, subsequent ones took {times[1:]}")
|
||||
print(
|
||||
f"First pathway prediction took {times[0]} seconds, subsequent ones took {times[1:]}"
|
||||
)
|
||||
|
||||
# Test eviction by performing three prediction with every model, twice.
|
||||
times = defaultdict(list)
|
||||
for _ in range(2): # Eviction should cause the second iteration here to have to reload the models
|
||||
for _ in range(
|
||||
2
|
||||
): # Eviction should cause the second iteration here to have to reload the models
|
||||
for mod in mods:
|
||||
for _ in range(3):
|
||||
times[mod.pk].append(measure_predict(mod))
|
||||
|
||||
@ -30,7 +30,6 @@ class ModelTest(TestCase):
|
||||
self.package,
|
||||
rule_package_objs,
|
||||
data_package_objs,
|
||||
eval_packages_objs,
|
||||
threshold=threshold,
|
||||
name="ECC - BBD - 0.5",
|
||||
description="Created MLRelativeReasoning in Testcase",
|
||||
@ -50,9 +49,7 @@ class ModelTest(TestCase):
|
||||
|
||||
mod.build_dataset()
|
||||
mod.build_model()
|
||||
mod.multigen_eval = True
|
||||
mod.save()
|
||||
mod.evaluate_model()
|
||||
mod.evaluate_model(True, eval_packages_objs)
|
||||
|
||||
results = mod.predict("CCN(CC)C(=O)C1=CC(=CC=C1)C")
|
||||
|
||||
|
||||
@ -6,7 +6,7 @@ from epdb.logic import UserManager
|
||||
from epdb.models import Package, User
|
||||
|
||||
|
||||
@override_settings(MODEL_DIR=s.FIXTURE_DIRS[0] / "models")
|
||||
@override_settings(MODEL_DIR=s.FIXTURE_DIRS[0] / "models", CELERY_TASK_ALWAYS_EAGER=True)
|
||||
class PathwayViewTest(TestCase):
|
||||
fixtures = ["test_fixtures_incl_model.jsonl.gz"]
|
||||
|
||||
|
||||
@ -6,7 +6,7 @@ from epdb.logic import UserManager, PackageManager
|
||||
from epdb.models import Pathway, Edge
|
||||
|
||||
|
||||
@override_settings(MODEL_DIR=s.FIXTURE_DIRS[0] / "models")
|
||||
@override_settings(MODEL_DIR=s.FIXTURE_DIRS[0] / "models", CELERY_TASK_ALWAYS_EAGER=True)
|
||||
class PathwayViewTest(TestCase):
|
||||
fixtures = ["test_fixtures_incl_model.jsonl.gz"]
|
||||
|
||||
|
||||
Reference in New Issue
Block a user