forked from enviPath/enviPy
[Feature] Basic logging of Jobs, Model Evaluation (#169)
Co-authored-by: Tim Lorsbach <tim@lorsba.ch> Reviewed-on: enviPath/enviPy#169
This commit is contained in:
@ -114,6 +114,6 @@ class Command(BaseCommand):
|
|||||||
print(f"Training {model_name}")
|
print(f"Training {model_name}")
|
||||||
model.build_model()
|
model.build_model()
|
||||||
print(f"Evaluating {model_name}")
|
print(f"Evaluating {model_name}")
|
||||||
model.evaluate_model()
|
model.evaluate_model(False, eval_packages=eval_packages)
|
||||||
print(f"Saving {model_name}")
|
print(f"Saving {model_name}")
|
||||||
model.save()
|
model.save()
|
||||||
|
|||||||
38
epdb/management/commands/update_job_logs.py
Normal file
38
epdb/management/commands/update_job_logs.py
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
from datetime import date, timedelta
|
||||||
|
|
||||||
|
from django.core.management.base import BaseCommand
|
||||||
|
from django.db import transaction
|
||||||
|
|
||||||
|
from epdb.models import JobLog
|
||||||
|
|
||||||
|
|
||||||
|
class Command(BaseCommand):
|
||||||
|
def add_arguments(self, parser):
|
||||||
|
parser.add_argument(
|
||||||
|
"--cleanup",
|
||||||
|
type=int,
|
||||||
|
default=None,
|
||||||
|
help="Remove all logs older than this number of days. Default is None, which does not remove any logs.",
|
||||||
|
)
|
||||||
|
|
||||||
|
@transaction.atomic
|
||||||
|
def handle(self, *args, **options):
|
||||||
|
if options["cleanup"] is not None:
|
||||||
|
cleanup_dt = date.today() - timedelta(days=options["cleanup"])
|
||||||
|
print(JobLog.objects.filter(created__lt=cleanup_dt).delete())
|
||||||
|
|
||||||
|
logs = JobLog.objects.filter(status="INITIAL")
|
||||||
|
print(f"Found {logs.count()} logs to update")
|
||||||
|
updated = 0
|
||||||
|
for log in logs:
|
||||||
|
res = log.check_for_update()
|
||||||
|
if res:
|
||||||
|
updated += 1
|
||||||
|
|
||||||
|
print(f"Updated {updated} logs")
|
||||||
|
|
||||||
|
from django.db.models import Count
|
||||||
|
|
||||||
|
qs = JobLog.objects.values("status").annotate(total=Count("status"))
|
||||||
|
for r in qs:
|
||||||
|
print(r["status"], r["total"])
|
||||||
@ -2225,10 +2225,18 @@ class PackageBasedModel(EPModel):
|
|||||||
self.model_status = self.BUILT_NOT_EVALUATED
|
self.model_status = self.BUILT_NOT_EVALUATED
|
||||||
self.save()
|
self.save()
|
||||||
|
|
||||||
def evaluate_model(self):
|
def evaluate_model(self, multigen: bool, eval_packages: List["Package"] = None):
|
||||||
if self.model_status != self.BUILT_NOT_EVALUATED:
|
if self.model_status != self.BUILT_NOT_EVALUATED:
|
||||||
raise ValueError(f"Can't evaluate a model in state {self.model_status}!")
|
raise ValueError(f"Can't evaluate a model in state {self.model_status}!")
|
||||||
|
|
||||||
|
if multigen:
|
||||||
|
self.multigen_eval = multigen
|
||||||
|
self.save()
|
||||||
|
|
||||||
|
if eval_packages is not None:
|
||||||
|
for p in eval_packages:
|
||||||
|
self.eval_packages.add(p)
|
||||||
|
|
||||||
self.model_status = self.EVALUATING
|
self.model_status = self.EVALUATING
|
||||||
self.save()
|
self.save()
|
||||||
|
|
||||||
@ -2525,7 +2533,6 @@ class RuleBasedRelativeReasoning(PackageBasedModel):
|
|||||||
package: "Package",
|
package: "Package",
|
||||||
rule_packages: List["Package"],
|
rule_packages: List["Package"],
|
||||||
data_packages: List["Package"],
|
data_packages: List["Package"],
|
||||||
eval_packages: List["Package"],
|
|
||||||
threshold: float = 0.5,
|
threshold: float = 0.5,
|
||||||
min_count: int = 10,
|
min_count: int = 10,
|
||||||
max_count: int = 0,
|
max_count: int = 0,
|
||||||
@ -2574,10 +2581,6 @@ class RuleBasedRelativeReasoning(PackageBasedModel):
|
|||||||
for p in rule_packages:
|
for p in rule_packages:
|
||||||
rbrr.data_packages.add(p)
|
rbrr.data_packages.add(p)
|
||||||
|
|
||||||
if eval_packages:
|
|
||||||
for p in eval_packages:
|
|
||||||
rbrr.eval_packages.add(p)
|
|
||||||
|
|
||||||
rbrr.save()
|
rbrr.save()
|
||||||
|
|
||||||
return rbrr
|
return rbrr
|
||||||
@ -2632,7 +2635,6 @@ class MLRelativeReasoning(PackageBasedModel):
|
|||||||
package: "Package",
|
package: "Package",
|
||||||
rule_packages: List["Package"],
|
rule_packages: List["Package"],
|
||||||
data_packages: List["Package"],
|
data_packages: List["Package"],
|
||||||
eval_packages: List["Package"],
|
|
||||||
threshold: float = 0.5,
|
threshold: float = 0.5,
|
||||||
name: "str" = None,
|
name: "str" = None,
|
||||||
description: str = None,
|
description: str = None,
|
||||||
@ -2672,10 +2674,6 @@ class MLRelativeReasoning(PackageBasedModel):
|
|||||||
for p in rule_packages:
|
for p in rule_packages:
|
||||||
mlrr.data_packages.add(p)
|
mlrr.data_packages.add(p)
|
||||||
|
|
||||||
if eval_packages:
|
|
||||||
for p in eval_packages:
|
|
||||||
mlrr.eval_packages.add(p)
|
|
||||||
|
|
||||||
if build_app_domain:
|
if build_app_domain:
|
||||||
ad = ApplicabilityDomain.create(
|
ad = ApplicabilityDomain.create(
|
||||||
mlrr,
|
mlrr,
|
||||||
@ -2995,7 +2993,6 @@ class EnviFormer(PackageBasedModel):
|
|||||||
def create(
|
def create(
|
||||||
package: "Package",
|
package: "Package",
|
||||||
data_packages: List["Package"],
|
data_packages: List["Package"],
|
||||||
eval_packages: List["Package"],
|
|
||||||
threshold: float = 0.5,
|
threshold: float = 0.5,
|
||||||
name: "str" = None,
|
name: "str" = None,
|
||||||
description: str = None,
|
description: str = None,
|
||||||
@ -3028,10 +3025,6 @@ class EnviFormer(PackageBasedModel):
|
|||||||
for p in data_packages:
|
for p in data_packages:
|
||||||
mod.data_packages.add(p)
|
mod.data_packages.add(p)
|
||||||
|
|
||||||
if eval_packages:
|
|
||||||
for p in eval_packages:
|
|
||||||
mod.eval_packages.add(p)
|
|
||||||
|
|
||||||
# if build_app_domain:
|
# if build_app_domain:
|
||||||
# ad = ApplicabilityDomain.create(mod, app_domain_num_neighbours, app_domain_reliability_threshold,
|
# ad = ApplicabilityDomain.create(mod, app_domain_num_neighbours, app_domain_reliability_threshold,
|
||||||
# app_domain_local_compatibility_threshold)
|
# app_domain_local_compatibility_threshold)
|
||||||
@ -3144,10 +3137,18 @@ class EnviFormer(PackageBasedModel):
|
|||||||
args = {"clz": "EnviFormer"}
|
args = {"clz": "EnviFormer"}
|
||||||
return args
|
return args
|
||||||
|
|
||||||
def evaluate_model(self):
|
def evaluate_model(self, multigen: bool, eval_packages: List["Package"] = None):
|
||||||
if self.model_status != self.BUILT_NOT_EVALUATED:
|
if self.model_status != self.BUILT_NOT_EVALUATED:
|
||||||
raise ValueError(f"Can't evaluate a model in state {self.model_status}!")
|
raise ValueError(f"Can't evaluate a model in state {self.model_status}!")
|
||||||
|
|
||||||
|
if multigen:
|
||||||
|
self.multigen_eval = multigen
|
||||||
|
self.save()
|
||||||
|
|
||||||
|
if eval_packages is not None:
|
||||||
|
for p in eval_packages:
|
||||||
|
self.eval_packages.add(p)
|
||||||
|
|
||||||
self.model_status = self.EVALUATING
|
self.model_status = self.EVALUATING
|
||||||
self.save()
|
self.save()
|
||||||
|
|
||||||
@ -3671,3 +3672,53 @@ class Setting(EnviPathModel):
|
|||||||
self.public = True
|
self.public = True
|
||||||
self.global_default = True
|
self.global_default = True
|
||||||
self.save()
|
self.save()
|
||||||
|
|
||||||
|
|
||||||
|
class JobLogStatus(models.TextChoices):
|
||||||
|
INITIAL = "INITIAL", "Initial"
|
||||||
|
SUCCESS = "SUCCESS", "Success"
|
||||||
|
FAILURE = "FAILURE", "Failure"
|
||||||
|
REVOKED = "REVOKED", "Revoked"
|
||||||
|
IGNORED = "IGNORED", "Ignored"
|
||||||
|
|
||||||
|
|
||||||
|
class JobLog(TimeStampedModel):
|
||||||
|
user = models.ForeignKey("epdb.User", models.CASCADE)
|
||||||
|
task_id = models.UUIDField(unique=True)
|
||||||
|
job_name = models.TextField(null=False, blank=False)
|
||||||
|
status = models.CharField(
|
||||||
|
max_length=20,
|
||||||
|
choices=JobLogStatus.choices,
|
||||||
|
default=JobLogStatus.INITIAL,
|
||||||
|
)
|
||||||
|
|
||||||
|
done_at = models.DateTimeField(null=True, blank=True, default=None)
|
||||||
|
task_result = models.TextField(null=True, blank=True, default=None)
|
||||||
|
|
||||||
|
def check_for_update(self):
|
||||||
|
async_res = self.get_result()
|
||||||
|
new_status = async_res.state
|
||||||
|
|
||||||
|
TERMINAL_STATES = [
|
||||||
|
"SUCCESS",
|
||||||
|
"FAILURE",
|
||||||
|
"REVOKED",
|
||||||
|
"IGNORED",
|
||||||
|
]
|
||||||
|
|
||||||
|
if new_status != self.status and new_status in TERMINAL_STATES:
|
||||||
|
self.status = new_status
|
||||||
|
self.done_at = async_res.date_done
|
||||||
|
|
||||||
|
if new_status == "SUCCESS":
|
||||||
|
self.task_result = async_res.result
|
||||||
|
|
||||||
|
self.save()
|
||||||
|
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_result(self):
|
||||||
|
from celery.result import AsyncResult
|
||||||
|
|
||||||
|
return AsyncResult(str(self.task_id))
|
||||||
|
|||||||
121
epdb/tasks.py
121
epdb/tasks.py
@ -1,10 +1,13 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import Optional
|
from datetime import datetime
|
||||||
from celery.utils.functional import LRUCache
|
from typing import Callable, Optional
|
||||||
from celery import shared_task
|
from uuid import uuid4
|
||||||
from epdb.models import Pathway, Node, EPModel, Setting
|
|
||||||
from epdb.logic import SPathway
|
|
||||||
|
|
||||||
|
from celery import shared_task
|
||||||
|
from celery.utils.functional import LRUCache
|
||||||
|
|
||||||
|
from epdb.logic import SPathway
|
||||||
|
from epdb.models import EPModel, JobLog, Node, Package, Pathway, Setting, User
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
ML_CACHE = LRUCache(3) # Cache the three most recent ML models to reduce load times.
|
ML_CACHE = LRUCache(3) # Cache the three most recent ML models to reduce load times.
|
||||||
@ -16,6 +19,40 @@ def get_ml_model(model_pk: int):
|
|||||||
return ML_CACHE[model_pk]
|
return ML_CACHE[model_pk]
|
||||||
|
|
||||||
|
|
||||||
|
def dispatch_eager(user: "User", job: Callable, *args, **kwargs):
|
||||||
|
try:
|
||||||
|
x = job(*args, **kwargs)
|
||||||
|
log = JobLog()
|
||||||
|
log.user = user
|
||||||
|
log.task_id = uuid4()
|
||||||
|
log.job_name = job.__name__
|
||||||
|
log.status = "SUCCESS"
|
||||||
|
log.done_at = datetime.now()
|
||||||
|
log.task_result = str(x) if x else None
|
||||||
|
log.save()
|
||||||
|
|
||||||
|
return x
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception(e)
|
||||||
|
raise e
|
||||||
|
|
||||||
|
|
||||||
|
def dispatch(user: "User", job: Callable, *args, **kwargs):
|
||||||
|
try:
|
||||||
|
x = job.delay(*args, **kwargs)
|
||||||
|
log = JobLog()
|
||||||
|
log.user = user
|
||||||
|
log.task_id = x.task_id
|
||||||
|
log.job_name = job.__name__
|
||||||
|
log.status = "INITIAL"
|
||||||
|
log.save()
|
||||||
|
|
||||||
|
return x.result
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception(e)
|
||||||
|
raise e
|
||||||
|
|
||||||
|
|
||||||
@shared_task(queue="background")
|
@shared_task(queue="background")
|
||||||
def mul(a, b):
|
def mul(a, b):
|
||||||
return a * b
|
return a * b
|
||||||
@ -33,17 +70,55 @@ def send_registration_mail(user_pk: int):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@shared_task(queue="model")
|
@shared_task(bind=True, queue="model")
|
||||||
def build_model(model_pk: int):
|
def build_model(self, model_pk: int):
|
||||||
mod = EPModel.objects.get(id=model_pk)
|
mod = EPModel.objects.get(id=model_pk)
|
||||||
mod.build_dataset()
|
|
||||||
mod.build_model()
|
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||||
|
JobLog.objects.filter(task_id=self.request.id).update(status="RUNNING", task_result=mod.url)
|
||||||
|
|
||||||
|
try:
|
||||||
|
mod.build_dataset()
|
||||||
|
mod.build_model()
|
||||||
|
except Exception as e:
|
||||||
|
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||||
|
JobLog.objects.filter(task_id=self.request.id).update(
|
||||||
|
status="FAILED", task_result=mod.url
|
||||||
|
)
|
||||||
|
|
||||||
|
raise e
|
||||||
|
|
||||||
|
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||||
|
JobLog.objects.filter(task_id=self.request.id).update(status="SUCCESS", task_result=mod.url)
|
||||||
|
|
||||||
|
return mod.url
|
||||||
|
|
||||||
|
|
||||||
@shared_task(queue="model")
|
@shared_task(bind=True, queue="model")
|
||||||
def evaluate_model(model_pk: int):
|
def evaluate_model(self, model_pk: int, multigen: bool, package_pks: Optional[list] = None):
|
||||||
|
packages = None
|
||||||
|
|
||||||
|
if package_pks:
|
||||||
|
packages = Package.objects.filter(pk__in=package_pks)
|
||||||
|
|
||||||
mod = EPModel.objects.get(id=model_pk)
|
mod = EPModel.objects.get(id=model_pk)
|
||||||
mod.evaluate_model()
|
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||||
|
JobLog.objects.filter(task_id=self.request.id).update(status="RUNNING", task_result=mod.url)
|
||||||
|
|
||||||
|
try:
|
||||||
|
mod.evaluate_model(multigen, eval_packages=packages)
|
||||||
|
except Exception as e:
|
||||||
|
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||||
|
JobLog.objects.filter(task_id=self.request.id).update(
|
||||||
|
status="FAILED", task_result=mod.url
|
||||||
|
)
|
||||||
|
|
||||||
|
raise e
|
||||||
|
|
||||||
|
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||||
|
JobLog.objects.filter(task_id=self.request.id).update(status="SUCCESS", task_result=mod.url)
|
||||||
|
|
||||||
|
return mod.url
|
||||||
|
|
||||||
|
|
||||||
@shared_task(queue="model")
|
@shared_task(queue="model")
|
||||||
@ -52,9 +127,13 @@ def retrain(model_pk: int):
|
|||||||
mod.retrain()
|
mod.retrain()
|
||||||
|
|
||||||
|
|
||||||
@shared_task(queue="predict")
|
@shared_task(bind=True, queue="predict")
|
||||||
def predict(
|
def predict(
|
||||||
pw_pk: int, pred_setting_pk: int, limit: Optional[int] = None, node_pk: Optional[int] = None
|
self,
|
||||||
|
pw_pk: int,
|
||||||
|
pred_setting_pk: int,
|
||||||
|
limit: Optional[int] = None,
|
||||||
|
node_pk: Optional[int] = None,
|
||||||
) -> Pathway:
|
) -> Pathway:
|
||||||
pw = Pathway.objects.get(id=pw_pk)
|
pw = Pathway.objects.get(id=pw_pk)
|
||||||
setting = Setting.objects.get(id=pred_setting_pk)
|
setting = Setting.objects.get(id=pred_setting_pk)
|
||||||
@ -65,6 +144,9 @@ def predict(
|
|||||||
pw.kv.update(**{"status": "running"})
|
pw.kv.update(**{"status": "running"})
|
||||||
pw.save()
|
pw.save()
|
||||||
|
|
||||||
|
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||||
|
JobLog.objects.filter(task_id=self.request.id).update(status="RUNNING", task_result=pw.url)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# regular prediction
|
# regular prediction
|
||||||
if limit is not None:
|
if limit is not None:
|
||||||
@ -89,7 +171,18 @@ def predict(
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
pw.kv.update({"status": "failed"})
|
pw.kv.update({"status": "failed"})
|
||||||
pw.save()
|
pw.save()
|
||||||
|
|
||||||
|
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||||
|
JobLog.objects.filter(task_id=self.request.id).update(
|
||||||
|
status="FAILED", task_result=pw.url
|
||||||
|
)
|
||||||
|
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
pw.kv.update(**{"status": "completed"})
|
pw.kv.update(**{"status": "completed"})
|
||||||
pw.save()
|
pw.save()
|
||||||
|
|
||||||
|
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||||
|
JobLog.objects.filter(task_id=self.request.id).update(status="SUCCESS", task_result=pw.url)
|
||||||
|
|
||||||
|
return pw.url
|
||||||
|
|||||||
@ -1,8 +1,21 @@
|
|||||||
from django import template
|
from django import template
|
||||||
|
from pydantic import AnyHttpUrl, ValidationError
|
||||||
|
from pydantic.type_adapter import TypeAdapter
|
||||||
|
|
||||||
register = template.Library()
|
register = template.Library()
|
||||||
|
|
||||||
|
url_adapter = TypeAdapter(AnyHttpUrl)
|
||||||
|
|
||||||
|
|
||||||
@register.filter
|
@register.filter
|
||||||
def classname(obj):
|
def classname(obj):
|
||||||
return obj.__class__.__name__
|
return obj.__class__.__name__
|
||||||
|
|
||||||
|
|
||||||
|
@register.filter
|
||||||
|
def is_url(value):
|
||||||
|
try:
|
||||||
|
url_adapter.validate_python(value)
|
||||||
|
return True
|
||||||
|
except ValidationError:
|
||||||
|
return False
|
||||||
|
|||||||
@ -190,6 +190,7 @@ urlpatterns = [
|
|||||||
re_path(r"^indigo/dearomatize$", v.dearomatize, name="indigo_dearomatize"),
|
re_path(r"^indigo/dearomatize$", v.dearomatize, name="indigo_dearomatize"),
|
||||||
re_path(r"^indigo/layout$", v.layout, name="indigo_layout"),
|
re_path(r"^indigo/layout$", v.layout, name="indigo_layout"),
|
||||||
re_path(r"^depict$", v.depict, name="depict"),
|
re_path(r"^depict$", v.depict, name="depict"),
|
||||||
|
re_path(r"^jobs", v.jobs, name="jobs"),
|
||||||
# OAuth Stuff
|
# OAuth Stuff
|
||||||
path("o/userinfo/", v.userinfo, name="oauth_userinfo"),
|
path("o/userinfo/", v.userinfo, name="oauth_userinfo"),
|
||||||
]
|
]
|
||||||
|
|||||||
163
epdb/views.py
163
epdb/views.py
@ -47,6 +47,7 @@ from .models import (
|
|||||||
ExternalDatabase,
|
ExternalDatabase,
|
||||||
ExternalIdentifier,
|
ExternalIdentifier,
|
||||||
EnzymeLink,
|
EnzymeLink,
|
||||||
|
JobLog,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -754,8 +755,8 @@ def package_models(request, package_uuid):
|
|||||||
context["unreviewed_objects"] = unreviewed_model_qs
|
context["unreviewed_objects"] = unreviewed_model_qs
|
||||||
|
|
||||||
context["model_types"] = {
|
context["model_types"] = {
|
||||||
"ML Relative Reasoning": "ml-relative-reasoning",
|
"ML Relative Reasoning": "mlrr",
|
||||||
"Rule Based Relative Reasoning": "rule-based-relative-reasoning",
|
"Rule Based Relative Reasoning": "rbrr",
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.FLAGS.get("ENVIFORMER", False):
|
if s.FLAGS.get("ENVIFORMER", False):
|
||||||
@ -775,69 +776,67 @@ def package_models(request, package_uuid):
|
|||||||
|
|
||||||
model_type = request.POST.get("model-type")
|
model_type = request.POST.get("model-type")
|
||||||
|
|
||||||
|
# Generic fields for ML and Rule Based
|
||||||
|
rule_packages = request.POST.getlist("model-rule-packages")
|
||||||
|
data_packages = request.POST.getlist("model-data-packages")
|
||||||
|
|
||||||
|
# Generic params
|
||||||
|
params = {
|
||||||
|
"package": current_package,
|
||||||
|
"name": name,
|
||||||
|
"description": description,
|
||||||
|
"data_packages": [
|
||||||
|
PackageManager.get_package_by_url(current_user, p) for p in data_packages
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
if model_type == "enviformer":
|
if model_type == "enviformer":
|
||||||
threshold = float(request.POST.get(f"{model_type}-threshold", 0.5))
|
threshold = float(request.POST.get("model-threshold", 0.5))
|
||||||
|
params["threshold"] = threshold
|
||||||
|
|
||||||
mod = EnviFormer.create(current_package, name, description, threshold)
|
mod = EnviFormer.create(**params)
|
||||||
|
elif model_type == "mlrr":
|
||||||
|
# ML Specific
|
||||||
|
threshold = float(request.POST.get("model-threshold", 0.5))
|
||||||
|
# TODO handle additional fingerprinter
|
||||||
|
# fingerprinter = request.POST.get("model-fingerprinter")
|
||||||
|
|
||||||
elif model_type == "ml-relative-reasoning" or model_type == "rule-based-relative-reasoning":
|
params["rule_packages"] = [
|
||||||
# Generic fields for ML and Rule Based
|
PackageManager.get_package_by_url(current_user, p) for p in rule_packages
|
||||||
rule_packages = request.POST.getlist("package-based-relative-reasoning-rule-packages")
|
]
|
||||||
data_packages = request.POST.getlist("package-based-relative-reasoning-data-packages")
|
|
||||||
eval_packages = request.POST.getlist(
|
|
||||||
"package-based-relative-reasoning-evaluation-packages", []
|
|
||||||
)
|
|
||||||
|
|
||||||
# Generic params
|
# App Domain related parameters
|
||||||
params = {
|
build_ad = request.POST.get("build-app-domain", False) == "on"
|
||||||
"package": current_package,
|
num_neighbors = request.POST.get("num-neighbors", 5)
|
||||||
"name": name,
|
reliability_threshold = request.POST.get("reliability-threshold", 0.5)
|
||||||
"description": description,
|
local_compatibility_threshold = request.POST.get("local-compatibility-threshold", 0.5)
|
||||||
"rule_packages": [
|
|
||||||
PackageManager.get_package_by_url(current_user, p) for p in rule_packages
|
|
||||||
],
|
|
||||||
"data_packages": [
|
|
||||||
PackageManager.get_package_by_url(current_user, p) for p in data_packages
|
|
||||||
],
|
|
||||||
"eval_packages": [
|
|
||||||
PackageManager.get_package_by_url(current_user, p) for p in eval_packages
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
if model_type == "ml-relative-reasoning":
|
params["threshold"] = threshold
|
||||||
# ML Specific
|
# params['fingerprinter'] = fingerprinter
|
||||||
threshold = float(request.POST.get(f"{model_type}-threshold", 0.5))
|
params["build_app_domain"] = build_ad
|
||||||
# TODO handle additional fingerprinter
|
params["app_domain_num_neighbours"] = num_neighbors
|
||||||
# fingerprinter = request.POST.get(f"{model_type}-fingerprinter")
|
params["app_domain_reliability_threshold"] = reliability_threshold
|
||||||
|
params["app_domain_local_compatibility_threshold"] = local_compatibility_threshold
|
||||||
|
|
||||||
# App Domain related parameters
|
mod = MLRelativeReasoning.create(**params)
|
||||||
build_ad = request.POST.get("build-app-domain", False) == "on"
|
elif model_type == "rbrr":
|
||||||
num_neighbors = request.POST.get("num-neighbors", 5)
|
params["rule_packages"] = [
|
||||||
reliability_threshold = request.POST.get("reliability-threshold", 0.5)
|
PackageManager.get_package_by_url(current_user, p) for p in rule_packages
|
||||||
local_compatibility_threshold = request.POST.get(
|
]
|
||||||
"local-compatibility-threshold", 0.5
|
|
||||||
)
|
|
||||||
|
|
||||||
params["threshold"] = threshold
|
mod = RuleBasedRelativeReasoning.create(**params)
|
||||||
# params['fingerprinter'] = fingerprinter
|
elif s.FLAGS.get("PLUGINS", False) and model_type in s.CLASSIFIER_PLUGINS.values():
|
||||||
params["build_app_domain"] = build_ad
|
pass
|
||||||
params["app_domain_num_neighbours"] = num_neighbors
|
|
||||||
params["app_domain_reliability_threshold"] = reliability_threshold
|
|
||||||
params["app_domain_local_compatibility_threshold"] = local_compatibility_threshold
|
|
||||||
|
|
||||||
mod = MLRelativeReasoning.create(**params)
|
|
||||||
else:
|
|
||||||
mod = RuleBasedRelativeReasoning.create(**params)
|
|
||||||
|
|
||||||
from .tasks import build_model
|
|
||||||
|
|
||||||
build_model.delay(mod.pk)
|
|
||||||
else:
|
else:
|
||||||
return error(
|
return error(
|
||||||
request, "Invalid model type.", f'Model type "{model_type}" is not supported."'
|
request, "Invalid model type.", f'Model type "{model_type}" is not supported."'
|
||||||
)
|
)
|
||||||
return redirect(mod.url)
|
|
||||||
|
|
||||||
|
from .tasks import dispatch, build_model
|
||||||
|
|
||||||
|
dispatch(current_user, build_model, mod.pk)
|
||||||
|
|
||||||
|
return redirect(mod.url)
|
||||||
else:
|
else:
|
||||||
return HttpResponseNotAllowed(["GET", "POST"])
|
return HttpResponseNotAllowed(["GET", "POST"])
|
||||||
|
|
||||||
@ -865,6 +864,10 @@ def package_model(request, package_uuid, model_uuid):
|
|||||||
return JsonResponse({"error": f'"{smiles}" is not a valid SMILES'}, status=400)
|
return JsonResponse({"error": f'"{smiles}" is not a valid SMILES'}, status=400)
|
||||||
|
|
||||||
if classify:
|
if classify:
|
||||||
|
from epdb.tasks import dispatch_eager, predict_simple
|
||||||
|
|
||||||
|
res = dispatch_eager(current_user, predict_simple, current_model.pk, stand_smiles)
|
||||||
|
|
||||||
pred_res = current_model.predict(stand_smiles)
|
pred_res = current_model.predict(stand_smiles)
|
||||||
res = []
|
res = []
|
||||||
|
|
||||||
@ -909,9 +912,25 @@ def package_model(request, package_uuid, model_uuid):
|
|||||||
current_model.delete()
|
current_model.delete()
|
||||||
return redirect(current_package.url + "/model")
|
return redirect(current_package.url + "/model")
|
||||||
elif hidden == "evaluate":
|
elif hidden == "evaluate":
|
||||||
from .tasks import evaluate_model
|
from .tasks import dispatch, evaluate_model
|
||||||
|
|
||||||
|
eval_type = request.POST.get("model-evaluation-type")
|
||||||
|
|
||||||
|
if eval_type not in ["sg", "mg"]:
|
||||||
|
return error(
|
||||||
|
request,
|
||||||
|
"Invalid evaluation type",
|
||||||
|
f'Evaluation type "{eval_type}" is not supported. Only "sg" and "mg" are supported.',
|
||||||
|
)
|
||||||
|
|
||||||
|
multigen = eval_type == "mg"
|
||||||
|
|
||||||
|
eval_packages = request.POST.getlist("model-evaluation-packages")
|
||||||
|
eval_package_ids = [
|
||||||
|
PackageManager.get_package_by_url(current_user, p).id for p in eval_packages
|
||||||
|
]
|
||||||
|
dispatch(current_user, evaluate_model, current_model.pk, multigen, eval_package_ids)
|
||||||
|
|
||||||
evaluate_model.delay(current_model.pk)
|
|
||||||
return redirect(current_model.url)
|
return redirect(current_model.url)
|
||||||
else:
|
else:
|
||||||
return HttpResponseBadRequest()
|
return HttpResponseBadRequest()
|
||||||
@ -1809,9 +1828,9 @@ def package_pathways(request, package_uuid):
|
|||||||
pw.setting = prediction_setting
|
pw.setting = prediction_setting
|
||||||
pw.save()
|
pw.save()
|
||||||
|
|
||||||
from .tasks import predict
|
from .tasks import dispatch, predict
|
||||||
|
|
||||||
predict.delay(pw.pk, prediction_setting.pk, limit=limit)
|
dispatch(current_user, predict, pw.pk, prediction_setting.pk, limit=limit)
|
||||||
|
|
||||||
return redirect(pw.url)
|
return redirect(pw.url)
|
||||||
|
|
||||||
@ -1930,10 +1949,16 @@ def package_pathway(request, package_uuid, pathway_uuid):
|
|||||||
if node_url:
|
if node_url:
|
||||||
n = current_pathway.get_node(node_url)
|
n = current_pathway.get_node(node_url)
|
||||||
|
|
||||||
from .tasks import predict
|
from .tasks import dispatch, predict
|
||||||
|
|
||||||
|
dispatch(
|
||||||
|
current_user,
|
||||||
|
predict,
|
||||||
|
current_pathway.pk,
|
||||||
|
current_pathway.prediction_setting.pk,
|
||||||
|
node_pk=n.pk,
|
||||||
|
)
|
||||||
|
|
||||||
# Dont delay?
|
|
||||||
predict(current_pathway.pk, current_pathway.setting.pk, node_pk=n.pk)
|
|
||||||
return JsonResponse({"success": current_pathway.url})
|
return JsonResponse({"success": current_pathway.url})
|
||||||
|
|
||||||
return HttpResponseBadRequest()
|
return HttpResponseBadRequest()
|
||||||
@ -2705,6 +2730,24 @@ def setting(request, setting_uuid):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def jobs(request):
|
||||||
|
current_user = _anonymous_or_real(request)
|
||||||
|
context = get_base_context(request)
|
||||||
|
|
||||||
|
if request.method == "GET":
|
||||||
|
context["object_type"] = "joblog"
|
||||||
|
context["breadcrumbs"] = [
|
||||||
|
{"Home": s.SERVER_URL},
|
||||||
|
{"Jobs": s.SERVER_URL + "/jobs"},
|
||||||
|
]
|
||||||
|
if current_user.is_superuser:
|
||||||
|
context["jobs"] = JobLog.objects.all().order_by("-created")
|
||||||
|
else:
|
||||||
|
context["jobs"] = JobLog.objects.filter(user=current_user).order_by("-created")
|
||||||
|
|
||||||
|
return render(request, "collections/joblog.html", context)
|
||||||
|
|
||||||
|
|
||||||
###########
|
###########
|
||||||
# KETCHER #
|
# KETCHER #
|
||||||
###########
|
###########
|
||||||
|
|||||||
71
templates/collections/joblog.html
Normal file
71
templates/collections/joblog.html
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
{% extends "framework.html" %}
|
||||||
|
{% load static %}
|
||||||
|
{% load envipytags %}
|
||||||
|
{% block content %}
|
||||||
|
|
||||||
|
<div class="panel-group" id="reviewListAccordion">
|
||||||
|
<div class="panel panel-default">
|
||||||
|
<div class="panel-heading" id="headingPanel" style="font-size:2rem;height: 46px">
|
||||||
|
Jobs
|
||||||
|
</div>
|
||||||
|
<div class="panel-body">
|
||||||
|
<p>
|
||||||
|
Job Logs Desc
|
||||||
|
</p>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="panel panel-default panel-heading list-group-item" style="background-color:silver">
|
||||||
|
<h4 class="panel-title">
|
||||||
|
<a id="job-accordion-link" data-toggle="collapse" data-parent="#job-accordion" href="#jobs">
|
||||||
|
Jobs
|
||||||
|
</a>
|
||||||
|
</h4>
|
||||||
|
</div>
|
||||||
|
<div id="jobs"
|
||||||
|
class="panel-collapse collapse in">
|
||||||
|
<div class="panel-body list-group-item" id="job-content">
|
||||||
|
<table class="table table-bordered table-hover">
|
||||||
|
<tr style="background-color: rgba(0, 0, 0, 0.08);">
|
||||||
|
<th scope="col">ID</th>
|
||||||
|
<th scope="col">Name</th>
|
||||||
|
<th scope="col">Status</th>
|
||||||
|
<th scope="col">Queued</th>
|
||||||
|
<th scope="col">Done</th>
|
||||||
|
<th scope="col">Result</th>
|
||||||
|
</tr>
|
||||||
|
<tbody>
|
||||||
|
{% for job in jobs %}
|
||||||
|
<tr>
|
||||||
|
<td>{{ job.task_id }}</td>
|
||||||
|
<td>{{ job.job_name }}</td>
|
||||||
|
<td>{{ job.status }}</td>
|
||||||
|
<td>{{ job.created }}</td>
|
||||||
|
<td>{{ job.done_at }}</td>
|
||||||
|
{% if job.task_result and job.task_result|is_url == True %}
|
||||||
|
<td><a href="{{ job.task_result }}">Result</a></td>
|
||||||
|
{% elif job.task_result %}
|
||||||
|
<td>{{ job.task_result|slice:"40" }}...</td>
|
||||||
|
{% else %}
|
||||||
|
<td>Empty</td>
|
||||||
|
{% endif %}
|
||||||
|
</tr>
|
||||||
|
{% endfor %}
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Unreviewable objects such as User / Group / Setting -->
|
||||||
|
<ul class='list-group'>
|
||||||
|
{% for obj in objects %}
|
||||||
|
{% if object_type == 'user' %}
|
||||||
|
<a class="list-group-item" href="{{ obj.url }}">{{ obj.username }}</a>
|
||||||
|
{% else %}
|
||||||
|
<a class="list-group-item" href="{{ obj.url }}">{{ obj.name }}</a>
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{% endblock content %}
|
||||||
@ -18,113 +18,117 @@
|
|||||||
prediction. You just need to set a name and the packages
|
prediction. You just need to set a name and the packages
|
||||||
you want the object to be based on. There are multiple types of models available.
|
you want the object to be based on. There are multiple types of models available.
|
||||||
For additional information have a look at our
|
For additional information have a look at our
|
||||||
<a target="_blank" href="https://wiki.envipath.org/index.php/relative-reasoning" role="button">wiki >></a>
|
<a target="_blank" href="https://wiki.envipath.org/index.php/relative-reasoning" role="button">wiki
|
||||||
|
>></a>
|
||||||
</div>
|
</div>
|
||||||
|
<!-- Name -->
|
||||||
<label for="model-name">Name</label>
|
<label for="model-name">Name</label>
|
||||||
<input id="model-name" name="model-name" class="form-control" placeholder="Name"/>
|
<input id="model-name" name="model-name" class="form-control" placeholder="Name"/>
|
||||||
|
|
||||||
|
<!-- Description -->
|
||||||
<label for="model-description">Description</label>
|
<label for="model-description">Description</label>
|
||||||
<input id="model-description" name="model-description" class="form-control"
|
<input id="model-description" name="model-description" class="form-control"
|
||||||
placeholder="Description"/>
|
placeholder="Description"/>
|
||||||
|
|
||||||
|
<!-- Model Type -->
|
||||||
<label for="model-type">Model Type</label>
|
<label for="model-type">Model Type</label>
|
||||||
<select id="model-type" name="model-type" class="form-control" data-width='100%'>
|
<select id="model-type" name="model-type" class="form-control" data-width='100%'>
|
||||||
<option disabled selected>Select Model Type</option>
|
<option disabled selected>Select Model Type</option>
|
||||||
{% for k, v in model_types.items %}
|
{% for k, v in model_types.items %}
|
||||||
<option value="{{ v }}">{{ k }}</option>
|
<option value="{{ v }}">{{ k }}</option>
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
</select>
|
</select>
|
||||||
<!-- ML and Rule Based Based Form-->
|
|
||||||
<div id="package-based-relative-reasoning-specific-form">
|
<!-- Rule Packages -->
|
||||||
<!-- Rule Packages -->
|
<div id="rule-packages" class="ep-model-param mlrr rbrr">
|
||||||
<label for="package-based-relative-reasoning-rule-packages">Rule Packages</label>
|
<label for="model-rule-packages">Rule Packages</label>
|
||||||
<select id="package-based-relative-reasoning-rule-packages" name="package-based-relative-reasoning-rule-packages"
|
<select id="model-rule-packages" name="model-rule-packages" data-actions-box='true'
|
||||||
data-actions-box='true' class="form-control" multiple data-width='100%'>
|
class="form-control" multiple data-width='100%'>
|
||||||
<option disabled>Reviewed Packages</option>
|
<option disabled>Reviewed Packages</option>
|
||||||
{% for obj in meta.readable_packages %}
|
{% for obj in meta.readable_packages %}
|
||||||
{% if obj.reviewed %}
|
{% if obj.reviewed %}
|
||||||
<option value="{{ obj.url }}">{{ obj.name }}</option>
|
<option value="{{ obj.url }}">{{ obj.name }}</option>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
<option disabled>Unreviewed Packages</option>
|
<option disabled>Unreviewed Packages</option>
|
||||||
{% for obj in meta.readable_packages %}
|
{% for obj in meta.readable_packages %}
|
||||||
{% if not obj.reviewed %}
|
{% if not obj.reviewed %}
|
||||||
<option value="{{ obj.url }}">{{ obj.name }}</option>
|
<option value="{{ obj.url }}">{{ obj.name }}</option>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
</select>
|
</select>
|
||||||
<!-- Data Packages -->
|
|
||||||
<label for="package-based-relative-reasoning-data-packages" >Data Packages</label>
|
|
||||||
<select id="package-based-relative-reasoning-data-packages" name="package-based-relative-reasoning-data-packages"
|
|
||||||
data-actions-box='true' class="form-control" multiple data-width='100%'>
|
|
||||||
<option disabled>Reviewed Packages</option>
|
|
||||||
{% for obj in meta.readable_packages %}
|
|
||||||
{% if obj.reviewed %}
|
|
||||||
<option value="{{ obj.url }}">{{ obj.name }}</option>
|
|
||||||
{% endif %}
|
|
||||||
{% endfor %}
|
|
||||||
|
|
||||||
<option disabled>Unreviewed Packages</option>
|
|
||||||
{% for obj in meta.readable_packages %}
|
|
||||||
{% if not obj.reviewed %}
|
|
||||||
<option value="{{ obj.url }}">{{ obj.name }}</option>
|
|
||||||
{% endif %}
|
|
||||||
{% endfor %}
|
|
||||||
</select>
|
|
||||||
|
|
||||||
<div id="ml-relative-reasoning-specific-form">
|
|
||||||
<!-- Fingerprinter -->
|
|
||||||
<label for="ml-relative-reasoning-fingerprinter">Fingerprinter</label>
|
|
||||||
<select id="ml-relative-reasoning-fingerprinter" name="ml-relative-reasoning-fingerprinter"
|
|
||||||
class="form-control">
|
|
||||||
<option value="MACCS" selected>MACCS Fingerprinter</option>
|
|
||||||
</select>
|
|
||||||
{% if meta.enabled_features.PLUGINS and additional_descriptors %}
|
|
||||||
<!-- Property Plugins go here -->
|
|
||||||
<label for="ml-relative-reasoning-additional-fingerprinter">Additional Fingerprinter /
|
|
||||||
Descriptors</label>
|
|
||||||
<select id="ml-relative-reasoning-additional-fingerprinter"
|
|
||||||
name="ml-relative-reasoning-additional-fingerprinter" class="form-control">
|
|
||||||
<option disabled selected>Select Additional Fingerprinter / Descriptor</option>
|
|
||||||
{% for k, v in additional_descriptors.items %}
|
|
||||||
<option value="{{ v }}">{{ k }}</option>
|
|
||||||
{% endfor %}
|
|
||||||
</select>
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
<label for="ml-relative-reasoning-threshold">Threshold</label>
|
|
||||||
<input type="number" min="0" max="1" step="0.05" value="0.5"
|
|
||||||
id="ml-relative-reasoning-threshold"
|
|
||||||
name="ml-relative-reasoning-threshold" class="form-control">
|
|
||||||
</div>
|
|
||||||
{% if meta.enabled_features.APPLICABILITY_DOMAIN %}
|
|
||||||
<!-- Build AD? -->
|
|
||||||
<div class="checkbox">
|
|
||||||
<label>
|
|
||||||
<input type="checkbox" id="build-app-domain" name="build-app-domain">Also build an
|
|
||||||
Applicability Domain?
|
|
||||||
</label>
|
|
||||||
</div>
|
|
||||||
<div id="ad-params" style="display:none">
|
|
||||||
<!-- Num Neighbors -->
|
|
||||||
<label for="num-neighbors">Number of Neighbors</label>
|
|
||||||
<input id="num-neighbors" name="num-neighbors" type="number" class="form-control" value="5"
|
|
||||||
step="1" min="0" max="10">
|
|
||||||
<!-- Local Compatibility -->
|
|
||||||
<label for="local-compatibility-threshold">Local Compatibility Threshold</label>
|
|
||||||
<input id="local-compatibility-threshold" name="local-compatibility-threshold" type="number"
|
|
||||||
class="form-control" value="0.5" step="0.01" min="0" max="1">
|
|
||||||
<!-- Reliability -->
|
|
||||||
<label for="reliability-threshold">Reliability Threshold</label>
|
|
||||||
<input id="reliability-threshold" name="reliability-threshold" type="number"
|
|
||||||
class="form-control" value="0.5" step="0.01" min="0" max="1">
|
|
||||||
</div>
|
|
||||||
{% endif %}
|
|
||||||
</div>
|
</div>
|
||||||
<!-- EnviFormer-->
|
|
||||||
<div id="enviformer-specific-form">
|
<!-- Data Packages -->
|
||||||
<label for="enviformer-threshold">Threshold</label>
|
<div id="data-packages" class="ep-model-param mlrr rbrr enviformer">
|
||||||
<input type="number" min="0" max="1" step="0.05" value="0.5" id="enviformer-threshold"
|
<label for="model-data-packages">Data Packages</label>
|
||||||
name="enviformer-threshold" class="form-control">
|
<select id="model-data-packages" name="model-data-packages" data-actions-box='true'
|
||||||
|
class="form-control" multiple data-width='100%'>
|
||||||
|
<option disabled>Reviewed Packages</option>
|
||||||
|
{% for obj in meta.readable_packages %}
|
||||||
|
{% if obj.reviewed %}
|
||||||
|
<option value="{{ obj.url }}">{{ obj.name }}</option>
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
<option disabled>Unreviewed Packages</option>
|
||||||
|
{% for obj in meta.readable_packages %}
|
||||||
|
{% if not obj.reviewed %}
|
||||||
|
<option value="{{ obj.url }}">{{ obj.name }}</option>
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Fingerprinter -->
|
||||||
|
<div id="fingerprinter" class="ep-model-param mlrr">
|
||||||
|
<label for="model-fingerprinter">Fingerprinter</label>
|
||||||
|
<select id="model-fingerprinter" name="model-fingerprinter" data-actions-box='true'
|
||||||
|
class="form-control" multiple data-width='100%'>
|
||||||
|
<option value="MACCS" selected>MACCS Fingerprinter</option>
|
||||||
|
{% if meta.enabled_features.PLUGINS and additional_descriptors %}
|
||||||
|
<option disabled selected>Select Additional Fingerprinter / Descriptor</option>
|
||||||
|
{% for k, v in additional_descriptors.items %}
|
||||||
|
<option value="{{ v }}">{{ k }}</option>
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Threshold -->
|
||||||
|
<div id="threshold" class="ep-model-param mlrr enviformer">
|
||||||
|
<label for="model-threshold">Threshold</label>
|
||||||
|
<input type="number" min="0" max="1" step="0.05" value="0.5" id="model-threshold"
|
||||||
|
name="model-threshold" class="form-control">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="appdomain" class="ep-model-param mlrr">
|
||||||
|
{% if meta.enabled_features.APPLICABILITY_DOMAIN %}
|
||||||
|
<!-- Build AD? -->
|
||||||
|
<div class="checkbox">
|
||||||
|
<label>
|
||||||
|
<input type="checkbox" id="build-app-domain" name="build-app-domain">Also build an
|
||||||
|
Applicability Domain?
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
<div id="ad-params" style="display:none">
|
||||||
|
<!-- Num Neighbors -->
|
||||||
|
<label for="num-neighbors">Number of Neighbors</label>
|
||||||
|
<input id="num-neighbors" name="num-neighbors" type="number" class="form-control"
|
||||||
|
value="5"
|
||||||
|
step="1" min="0" max="10">
|
||||||
|
<!-- Local Compatibility -->
|
||||||
|
<label for="local-compatibility-threshold">Local Compatibility Threshold</label>
|
||||||
|
<input id="local-compatibility-threshold" name="local-compatibility-threshold"
|
||||||
|
type="number"
|
||||||
|
class="form-control" value="0.5" step="0.01" min="0" max="1">
|
||||||
|
<!-- Reliability -->
|
||||||
|
<label for="reliability-threshold">Reliability Threshold</label>
|
||||||
|
<input id="reliability-threshold" name="reliability-threshold" type="number"
|
||||||
|
class="form-control" value="0.5" step="0.01" min="0" max="1">
|
||||||
|
</div>
|
||||||
|
{% endif %}
|
||||||
</div>
|
</div>
|
||||||
</form>
|
</form>
|
||||||
</div>
|
</div>
|
||||||
@ -137,53 +141,47 @@
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
<script>
|
<script>
|
||||||
$(function() {
|
$(function () {
|
||||||
// Initially hide all "specific" forms
|
// Built in Model Types
|
||||||
$("div[id$='-specific-form']").each( function() {
|
var nativeModelTypes = [
|
||||||
$(this).hide();
|
"mlrr",
|
||||||
});
|
"rbrr",
|
||||||
|
"enviformer",
|
||||||
|
]
|
||||||
|
|
||||||
$('#model-type').selectpicker();
|
// Initially hide all "specific" forms
|
||||||
$("#ml-relative-reasoning-fingerprinter").selectpicker();
|
$(".ep-model-param").each(function () {
|
||||||
$("#package-based-relative-reasoning-rule-packages").selectpicker();
|
|
||||||
$("#package-based-relative-reasoning-data-packages").selectpicker();
|
|
||||||
$("#package-based-relative-reasoning-evaluation-packages").selectpicker();
|
|
||||||
if ($('#ml-relative-reasoning-additional-fingerprinter').length > 0) {
|
|
||||||
$("#ml-relative-reasoning-additional-fingerprinter").selectpicker();
|
|
||||||
}
|
|
||||||
|
|
||||||
$("#build-app-domain").change(function () {
|
|
||||||
if ($(this).is(":checked")) {
|
|
||||||
$('#ad-params').show();
|
|
||||||
} else {
|
|
||||||
$('#ad-params').hide();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// On change hide all and show only selected
|
|
||||||
$("#model-type").change(function() {
|
|
||||||
$("div[id$='-specific-form']").each( function() {
|
|
||||||
$(this).hide();
|
$(this).hide();
|
||||||
});
|
});
|
||||||
val = $('option:selected', this).val();
|
|
||||||
|
|
||||||
if (val === 'ml-relative-reasoning' || val === 'rule-based-relative-reasoning') {
|
$('#model-type').selectpicker();
|
||||||
$("#package-based-relative-reasoning-specific-form").show();
|
$("#model-fingerprinter").selectpicker();
|
||||||
if (val === 'ml-relative-reasoning') {
|
$("#model-rule-packages").selectpicker();
|
||||||
$("#ml-relative-reasoning-specific-form").show();
|
$("#model-data-packages").selectpicker();
|
||||||
|
|
||||||
|
$("#build-app-domain").change(function () {
|
||||||
|
if ($(this).is(":checked")) {
|
||||||
|
$('#ad-params').show();
|
||||||
|
} else {
|
||||||
|
$('#ad-params').hide();
|
||||||
}
|
}
|
||||||
} else {
|
});
|
||||||
$("#" + val + "-specific-form").show();
|
|
||||||
}
|
// On change hide all and show only selected
|
||||||
|
$("#model-type").change(function () {
|
||||||
|
$('.ep-model-param').hide();
|
||||||
|
var modelType = $('#model-type').val();
|
||||||
|
if (nativeModelTypes.indexOf(modelType) !== -1) {
|
||||||
|
$('.' + modelType).show();
|
||||||
|
} else {
|
||||||
|
// do nothing
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
$('#new_model_modal_form_submit').on('click', function (e) {
|
||||||
|
e.preventDefault();
|
||||||
|
$('#new_model_form').submit();
|
||||||
|
});
|
||||||
|
|
||||||
});
|
});
|
||||||
|
|
||||||
$('#new_model_modal_form_submit').on('click', function(e){
|
|
||||||
e.preventDefault();
|
|
||||||
$('#new_model_form').submit();
|
|
||||||
});
|
|
||||||
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
</script>
|
</script>
|
||||||
|
|||||||
@ -17,10 +17,10 @@
|
|||||||
For evaluation, you need to select the packages you want to use.
|
For evaluation, you need to select the packages you want to use.
|
||||||
While the model is evaluating, you can use the model for predictions.
|
While the model is evaluating, you can use the model for predictions.
|
||||||
</div>
|
</div>
|
||||||
<!-- Evaluation -->
|
<!-- Evaluation Packages -->
|
||||||
<label for="relative-reasoning-evaluation-packages">Evaluation Packages</label>
|
<label for="model-evaluation-packages">Evaluation Packages</label>
|
||||||
<select id="relative-reasoning-evaluation-packages" name=relative-reasoning-evaluation-packages"
|
<select id="model-evaluation-packages" name="model-evaluation-packages" data-actions-box='true'
|
||||||
data-actions-box='true' class="form-control" multiple data-width='100%'>
|
class="form-control" multiple data-width='100%'>
|
||||||
<option disabled>Reviewed Packages</option>
|
<option disabled>Reviewed Packages</option>
|
||||||
{% for obj in meta.readable_packages %}
|
{% for obj in meta.readable_packages %}
|
||||||
{% if obj.reviewed %}
|
{% if obj.reviewed %}
|
||||||
@ -35,7 +35,16 @@
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
</select>
|
</select>
|
||||||
<input type="hidden" name="hidden" value="evaluate">
|
|
||||||
|
<!-- Eval Type -->
|
||||||
|
<label for="model-evaluation-type">Evaluation Type</label>
|
||||||
|
<select id="model-evaluation-type" name="model-evaluation-type" class="form-control">
|
||||||
|
<option disabled selected>Select evaluation type</option>
|
||||||
|
<option value="sg">Single Generation</option>
|
||||||
|
<option value="mg">Multiple Generations</option>
|
||||||
|
</select>
|
||||||
|
|
||||||
|
<input type="hidden" name="hidden" value="evaluate">
|
||||||
</form>
|
</form>
|
||||||
</div>
|
</div>
|
||||||
<div class="modal-footer">
|
<div class="modal-footer">
|
||||||
@ -50,7 +59,7 @@
|
|||||||
|
|
||||||
$(function () {
|
$(function () {
|
||||||
|
|
||||||
$("#relative-reasoning-evaluation-packages").selectpicker();
|
$("#model-evaluation-packages").selectpicker();
|
||||||
|
|
||||||
$('#evaluate_model_form_submit').on('click', function (e) {
|
$('#evaluate_model_form_submit').on('click', function (e) {
|
||||||
e.preventDefault();
|
e.preventDefault();
|
||||||
|
|||||||
@ -117,7 +117,7 @@
|
|||||||
<!-- End Predict Panel -->
|
<!-- End Predict Panel -->
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if model.app_domain %}
|
{% if model.ready_for_prediction and model.app_domain %}
|
||||||
<!-- App Domain -->
|
<!-- App Domain -->
|
||||||
<div class="panel panel-default panel-heading list-group-item" style="background-color:silver">
|
<div class="panel panel-default panel-heading list-group-item" style="background-color:silver">
|
||||||
<h4 class="panel-title">
|
<h4 class="panel-title">
|
||||||
|
|||||||
@ -3,7 +3,7 @@ from datetime import datetime
|
|||||||
from tempfile import TemporaryDirectory
|
from tempfile import TemporaryDirectory
|
||||||
from django.test import TestCase, tag
|
from django.test import TestCase, tag
|
||||||
from epdb.logic import PackageManager
|
from epdb.logic import PackageManager
|
||||||
from epdb.models import User, EnviFormer, Package, Setting, Pathway
|
from epdb.models import User, EnviFormer, Package, Setting
|
||||||
from epdb.tasks import predict_simple, predict
|
from epdb.tasks import predict_simple, predict
|
||||||
|
|
||||||
|
|
||||||
@ -48,9 +48,7 @@ class EnviFormerTest(TestCase):
|
|||||||
|
|
||||||
mod.build_dataset()
|
mod.build_dataset()
|
||||||
mod.build_model()
|
mod.build_model()
|
||||||
mod.multigen_eval = True
|
mod.evaluate_model(True, eval_packages_objs)
|
||||||
mod.save()
|
|
||||||
mod.evaluate_model()
|
|
||||||
|
|
||||||
mod.predict("CCN(CC)C(=O)C1=CC(=CC=C1)C")
|
mod.predict("CCN(CC)C(=O)C1=CC(=CC=C1)C")
|
||||||
|
|
||||||
@ -75,11 +73,15 @@ class EnviFormerTest(TestCase):
|
|||||||
|
|
||||||
# Test pathway prediction
|
# Test pathway prediction
|
||||||
times = [measure_predict(mods[1], self.BBD_SUBSET.pathways[0].pk) for _ in range(5)]
|
times = [measure_predict(mods[1], self.BBD_SUBSET.pathways[0].pk) for _ in range(5)]
|
||||||
print(f"First pathway prediction took {times[0]} seconds, subsequent ones took {times[1:]}")
|
print(
|
||||||
|
f"First pathway prediction took {times[0]} seconds, subsequent ones took {times[1:]}"
|
||||||
|
)
|
||||||
|
|
||||||
# Test eviction by performing three prediction with every model, twice.
|
# Test eviction by performing three prediction with every model, twice.
|
||||||
times = defaultdict(list)
|
times = defaultdict(list)
|
||||||
for _ in range(2): # Eviction should cause the second iteration here to have to reload the models
|
for _ in range(
|
||||||
|
2
|
||||||
|
): # Eviction should cause the second iteration here to have to reload the models
|
||||||
for mod in mods:
|
for mod in mods:
|
||||||
for _ in range(3):
|
for _ in range(3):
|
||||||
times[mod.pk].append(measure_predict(mod))
|
times[mod.pk].append(measure_predict(mod))
|
||||||
|
|||||||
@ -30,7 +30,6 @@ class ModelTest(TestCase):
|
|||||||
self.package,
|
self.package,
|
||||||
rule_package_objs,
|
rule_package_objs,
|
||||||
data_package_objs,
|
data_package_objs,
|
||||||
eval_packages_objs,
|
|
||||||
threshold=threshold,
|
threshold=threshold,
|
||||||
name="ECC - BBD - 0.5",
|
name="ECC - BBD - 0.5",
|
||||||
description="Created MLRelativeReasoning in Testcase",
|
description="Created MLRelativeReasoning in Testcase",
|
||||||
@ -50,9 +49,7 @@ class ModelTest(TestCase):
|
|||||||
|
|
||||||
mod.build_dataset()
|
mod.build_dataset()
|
||||||
mod.build_model()
|
mod.build_model()
|
||||||
mod.multigen_eval = True
|
mod.evaluate_model(True, eval_packages_objs)
|
||||||
mod.save()
|
|
||||||
mod.evaluate_model()
|
|
||||||
|
|
||||||
results = mod.predict("CCN(CC)C(=O)C1=CC(=CC=C1)C")
|
results = mod.predict("CCN(CC)C(=O)C1=CC(=CC=C1)C")
|
||||||
|
|
||||||
|
|||||||
@ -6,7 +6,7 @@ from epdb.logic import UserManager
|
|||||||
from epdb.models import Package, User
|
from epdb.models import Package, User
|
||||||
|
|
||||||
|
|
||||||
@override_settings(MODEL_DIR=s.FIXTURE_DIRS[0] / "models")
|
@override_settings(MODEL_DIR=s.FIXTURE_DIRS[0] / "models", CELERY_TASK_ALWAYS_EAGER=True)
|
||||||
class PathwayViewTest(TestCase):
|
class PathwayViewTest(TestCase):
|
||||||
fixtures = ["test_fixtures_incl_model.jsonl.gz"]
|
fixtures = ["test_fixtures_incl_model.jsonl.gz"]
|
||||||
|
|
||||||
|
|||||||
@ -6,7 +6,7 @@ from epdb.logic import UserManager, PackageManager
|
|||||||
from epdb.models import Pathway, Edge
|
from epdb.models import Pathway, Edge
|
||||||
|
|
||||||
|
|
||||||
@override_settings(MODEL_DIR=s.FIXTURE_DIRS[0] / "models")
|
@override_settings(MODEL_DIR=s.FIXTURE_DIRS[0] / "models", CELERY_TASK_ALWAYS_EAGER=True)
|
||||||
class PathwayViewTest(TestCase):
|
class PathwayViewTest(TestCase):
|
||||||
fixtures = ["test_fixtures_incl_model.jsonl.gz"]
|
fixtures = ["test_fixtures_incl_model.jsonl.gz"]
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user