forked from enviPath/enviPy
Compare commits
14 Commits
develop
...
enhancemen
| Author | SHA1 | Date | |
|---|---|---|---|
| 9ec5e433ea | |||
| dddea79daf | |||
| cfd8d7440b | |||
| 6a5413b492 | |||
| 8282855975 | |||
| 09ddd46d69 | |||
| 9f0e396437 | |||
| 5dc4c822c4 | |||
| f1f7ce344c | |||
| 13af49488e | |||
| ac5d370b18 | |||
| ff51e48f90 | |||
| 8166df6f39 | |||
| 2980a75daa |
@ -20,16 +20,3 @@ LOG_LEVEL='INFO'
|
||||
SERVER_URL='http://localhost:8000'
|
||||
PLUGINS_ENABLED=True
|
||||
EP_DATA_DIR='data'
|
||||
EMAIL_HOST_USER='admin@envipath.com'
|
||||
EMAIL_HOST_PASSWORD='dummy-password'
|
||||
|
||||
DEFAULT_FROM_EMAIL="test@test.com"
|
||||
SERVER_EMAIL='test@test.com'
|
||||
|
||||
# Testing settings VScode
|
||||
DJANGO_SETTINGS_MODULE='envipath.settings'
|
||||
MANAGE_PY_PATH='./manage.py'
|
||||
|
||||
APPLICABILITY_DOMAIN_ENABLED=True
|
||||
ENVIFORMER_PRESENT=True
|
||||
MODEL_BUILDING_ENABLED=True
|
||||
|
||||
@ -1,67 +0,0 @@
|
||||
name: 'Setup enviPy Environment'
|
||||
description: 'Shared setup for enviPy CI - installs dependencies and prepares environment'
|
||||
|
||||
inputs:
|
||||
skip-frontend:
|
||||
description: 'Skip frontend build steps (pnpm, tailwind)'
|
||||
required: false
|
||||
default: 'false'
|
||||
skip-playwright:
|
||||
description: 'Skip Playwright installation'
|
||||
required: false
|
||||
default: 'false'
|
||||
ssh-private-key:
|
||||
description: 'SSH private key for git access'
|
||||
required: true
|
||||
run-migrations:
|
||||
description: 'Run Django migrations after setup'
|
||||
required: false
|
||||
default: 'true'
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Setup ssh
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p ~/.ssh
|
||||
echo "${{ inputs.ssh-private-key }}" > ~/.ssh/id_ed25519
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
ssh-keyscan git.envipath.com >> ~/.ssh/known_hosts
|
||||
eval $(ssh-agent -s)
|
||||
ssh-add ~/.ssh/id_ed25519
|
||||
|
||||
- name: Setup Python venv
|
||||
shell: bash
|
||||
run: |
|
||||
uv sync --locked --all-extras --dev
|
||||
|
||||
- name: Install Playwright
|
||||
if: inputs.skip-playwright == 'false'
|
||||
shell: bash
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
playwright install --with-deps
|
||||
|
||||
- name: Build Frontend
|
||||
if: inputs.skip-frontend == 'false'
|
||||
shell: bash
|
||||
run: |
|
||||
uv run python scripts/pnpm_wrapper.py install
|
||||
uv run python scripts/pnpm_wrapper.py run build
|
||||
|
||||
- name: Wait for Postgres
|
||||
shell: bash
|
||||
run: |
|
||||
until pg_isready -h postgres -U ${{ env.POSTGRES_USER }}; do
|
||||
echo "Waiting for postgres..."
|
||||
sleep 2
|
||||
done
|
||||
echo "Postgres is ready!"
|
||||
|
||||
- name: Run Django Migrations
|
||||
if: inputs.run-migrations == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
python manage.py migrate --noinput
|
||||
@ -1,53 +0,0 @@
|
||||
# Custom CI Docker image for Gitea runners
|
||||
# Pre-installs Node.js 24, pnpm 10, uv, and system dependencies
|
||||
# to eliminate setup time in CI workflows
|
||||
|
||||
FROM ubuntu:24.04
|
||||
|
||||
# Prevent interactive prompts during package installation
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && \
|
||||
apt-get install -y \
|
||||
postgresql-client \
|
||||
redis-tools \
|
||||
openjdk-11-jre-headless \
|
||||
curl \
|
||||
ca-certificates \
|
||||
gnupg \
|
||||
lsb-release \
|
||||
git \
|
||||
ssh \
|
||||
libxrender1 \
|
||||
libxext6 \
|
||||
libfontconfig1 \
|
||||
libfreetype6 \
|
||||
libcairo2 \
|
||||
libglib2.0-0t64 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Node.js 24 via NodeSource
|
||||
RUN curl -fsSL https://deb.nodesource.com/setup_24.x | bash - && \
|
||||
apt-get install -y nodejs && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Enable corepack and install pnpm 10
|
||||
RUN corepack enable && \
|
||||
corepack prepare pnpm@10 --activate
|
||||
|
||||
# Install uv https://docs.astral.sh/uv/guides/integration/docker/#available-images
|
||||
COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
# Verify installations
|
||||
RUN node --version && \
|
||||
npm --version && \
|
||||
pnpm --version && \
|
||||
uv --version && \
|
||||
pg_isready --version && \
|
||||
redis-cli --version && \
|
||||
java -version
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /workspace
|
||||
@ -1,86 +0,0 @@
|
||||
name: API CI
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- 'epapi/**'
|
||||
- 'epdb/models.py' # API depends on models
|
||||
- 'epdb/logic.py' # API depends on business logic
|
||||
- 'tests/fixtures/**' # API tests use fixtures
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
api-tests:
|
||||
if: ${{ !contains(gitea.event.pull_request.title, 'WIP') }}
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: git.envipath.com/envipath/envipy-ci:latest
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16
|
||||
env:
|
||||
POSTGRES_USER: ${{ vars.POSTGRES_USER }}
|
||||
POSTGRES_PASSWORD: ${{ secrets.POSTGRES_PASSWORD }}
|
||||
POSTGRES_DB: ${{ vars.POSTGRES_DB }}
|
||||
ports:
|
||||
- ${{ vars.POSTGRES_PORT}}:5432
|
||||
options: >-
|
||||
--health-cmd="pg_isready -U postgres"
|
||||
--health-interval=10s
|
||||
--health-timeout=5s
|
||||
--health-retries=5
|
||||
|
||||
env:
|
||||
RUNNER_TOOL_CACHE: /toolcache
|
||||
EP_DATA_DIR: /opt/enviPy/
|
||||
ALLOWED_HOSTS: 127.0.0.1,localhost
|
||||
DEBUG: True
|
||||
LOG_LEVEL: INFO
|
||||
MODEL_BUILDING_ENABLED: True
|
||||
APPLICABILITY_DOMAIN_ENABLED: True
|
||||
ENVIFORMER_PRESENT: True
|
||||
ENVIFORMER_DEVICE: cpu
|
||||
FLAG_CELERY_PRESENT: False
|
||||
PLUGINS_ENABLED: True
|
||||
SERVER_URL: http://localhost:8000
|
||||
ADMIN_APPROVAL_REQUIRED: True
|
||||
REGISTRATION_MANDATORY: True
|
||||
LOG_DIR: ''
|
||||
# DB
|
||||
POSTGRES_SERVICE_NAME: postgres
|
||||
POSTGRES_DB: ${{ vars.POSTGRES_DB }}
|
||||
POSTGRES_USER: ${{ vars.POSTGRES_USER }}
|
||||
POSTGRES_PASSWORD: ${{ secrets.POSTGRES_PASSWORD }}
|
||||
POSTGRES_PORT: 5432
|
||||
# SENTRY
|
||||
SENTRY_ENABLED: False
|
||||
# MS ENTRA
|
||||
MS_ENTRA_ENABLED: False
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Use shared setup action - skips frontend builds for API-only tests
|
||||
- name: Setup enviPy Environment
|
||||
uses: ./.gitea/actions/setup-envipy
|
||||
with:
|
||||
skip-frontend: 'true'
|
||||
skip-playwright: 'false'
|
||||
ssh-private-key: ${{ secrets.ENVIPY_CI_PRIVATE_KEY }}
|
||||
run-migrations: 'true'
|
||||
|
||||
- name: Run API tests
|
||||
run: |
|
||||
.venv/bin/python manage.py test epapi -v 2
|
||||
|
||||
- name: Test API endpoints availability
|
||||
run: |
|
||||
.venv/bin/python manage.py runserver 0.0.0.0:8000 &
|
||||
SERVER_PID=$!
|
||||
sleep 5
|
||||
curl -f http://localhost:8000/api/v1/docs || echo "API docs not available"
|
||||
kill $SERVER_PID
|
||||
@ -1,48 +0,0 @@
|
||||
name: Build CI Docker Image
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
paths:
|
||||
- '.gitea/docker/Dockerfile.ci'
|
||||
- '.gitea/workflows/build-ci-image.yaml'
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to container registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: git.envipath.com
|
||||
username: ${{ secrets.CI_REGISTRY_USER }}
|
||||
password: ${{ secrets.CI_REGISTRY_PASSWORD }}
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: git.envipath.com/envipath/envipy-ci
|
||||
tags: |
|
||||
type=raw,value=latest
|
||||
type=sha,prefix={{branch}}-
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: .gitea/docker/Dockerfile.ci
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=registry,ref=git.envipath.com/envipath/envipy-ci:latest
|
||||
cache-to: type=inline
|
||||
@ -1,87 +0,0 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
if: ${{ !contains(gitea.event.pull_request.title, 'WIP') }}
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: git.envipath.com/envipath/envipy-ci:latest
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16
|
||||
env:
|
||||
POSTGRES_USER: ${{ vars.POSTGRES_USER }}
|
||||
POSTGRES_PASSWORD: ${{ secrets.POSTGRES_PASSWORD }}
|
||||
POSTGRES_DB: ${{ vars.POSTGRES_DB }}
|
||||
ports:
|
||||
- ${{ vars.POSTGRES_PORT}}:5432
|
||||
options: >-
|
||||
--health-cmd="pg_isready -U postgres"
|
||||
--health-interval=10s
|
||||
--health-timeout=5s
|
||||
--health-retries=5
|
||||
|
||||
#redis:
|
||||
# image: redis:7
|
||||
# ports:
|
||||
# - 6379:6379
|
||||
# options: >-
|
||||
# --health-cmd "redis-cli ping"
|
||||
# --health-interval=10s
|
||||
# --health-timeout=5s
|
||||
# --health-retries=5
|
||||
|
||||
env:
|
||||
RUNNER_TOOL_CACHE: /toolcache
|
||||
EP_DATA_DIR: /opt/enviPy/
|
||||
ALLOWED_HOSTS: 127.0.0.1,localhost
|
||||
DEBUG: True
|
||||
LOG_LEVEL: INFO
|
||||
MODEL_BUILDING_ENABLED: True
|
||||
APPLICABILITY_DOMAIN_ENABLED: True
|
||||
ENVIFORMER_PRESENT: True
|
||||
ENVIFORMER_DEVICE: cpu
|
||||
FLAG_CELERY_PRESENT: False
|
||||
PLUGINS_ENABLED: True
|
||||
SERVER_URL: http://localhost:8000
|
||||
ADMIN_APPROVAL_REQUIRED: True
|
||||
REGISTRATION_MANDATORY: True
|
||||
LOG_DIR: ''
|
||||
# DB
|
||||
POSTGRES_SERVICE_NAME: postgres
|
||||
POSTGRES_DB: ${{ vars.POSTGRES_DB }}
|
||||
POSTGRES_USER: ${{ vars.POSTGRES_USER }}
|
||||
POSTGRES_PASSWORD: ${{ secrets.POSTGRES_PASSWORD }}
|
||||
POSTGRES_PORT: 5432
|
||||
# SENTRY
|
||||
SENTRY_ENABLED: False
|
||||
# MS ENTRA
|
||||
MS_ENTRA_ENABLED: False
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Use shared setup action - includes all dependencies and migrations
|
||||
- name: Setup enviPy Environment
|
||||
uses: ./.gitea/actions/setup-envipy
|
||||
with:
|
||||
skip-frontend: 'false'
|
||||
skip-playwright: 'false'
|
||||
ssh-private-key: ${{ secrets.ENVIPY_CI_PRIVATE_KEY }}
|
||||
run-migrations: 'true'
|
||||
|
||||
- name: Run frontend tests
|
||||
run: |
|
||||
.venv/bin/python manage.py test --tag frontend
|
||||
|
||||
- name: Run Django tests
|
||||
run: |
|
||||
.venv/bin/python manage.py test tests --exclude-tag slow --exclude-tag frontend
|
||||
369
.gitignore
vendored
369
.gitignore
vendored
@ -1,375 +1,12 @@
|
||||
|
||||
|
||||
|
||||
### Python ###
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[codz]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py.cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
*.pyc
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
.idea/
|
||||
static/admin/
|
||||
static/django_extensions/
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
|
||||
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
|
||||
# pdm.lock
|
||||
# pdm.toml
|
||||
.pdm-python
|
||||
.pdm-build/
|
||||
|
||||
# pixi
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
|
||||
# pixi.lock
|
||||
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
|
||||
# in the .venv directory. It is recommended not to include this directory in version control.
|
||||
.pixi
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# Redis
|
||||
*.rdb
|
||||
*.aof
|
||||
*.pid
|
||||
|
||||
# RabbitMQ
|
||||
mnesia/
|
||||
rabbitmq/
|
||||
rabbitmq-data/
|
||||
|
||||
# ActiveMQ
|
||||
activemq-data/
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.envrc
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
.idea/
|
||||
|
||||
# Abstra
|
||||
# Abstra is an AI-powered process automation framework.
|
||||
# Ignore directories containing user credentials, local state, and settings.
|
||||
# Learn more at https://abstra.io/docs
|
||||
.abstra/
|
||||
|
||||
# Visual Studio Code
|
||||
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
||||
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
||||
# you could uncomment the following to ignore the entire vscode folder
|
||||
.vscode/
|
||||
*.code-workspace
|
||||
|
||||
# Ruff stuff:
|
||||
.ruff_cache/
|
||||
|
||||
# UV cache
|
||||
.uv-cache/
|
||||
|
||||
# PyPI configuration file
|
||||
.pypirc
|
||||
|
||||
# Marimo
|
||||
marimo/_static/
|
||||
marimo/_lsp/
|
||||
__marimo__/
|
||||
|
||||
# Streamlit
|
||||
.streamlit/secrets.toml
|
||||
|
||||
### Agents ###
|
||||
.claude/
|
||||
.codex/
|
||||
.cursor/
|
||||
.github/prompts/
|
||||
.junie/
|
||||
.windsurf/
|
||||
|
||||
AGENTS.md
|
||||
CLAUDE.md
|
||||
GEMINI.md
|
||||
.aider.*
|
||||
|
||||
### Node.js ###
|
||||
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
lerna-debug.log*
|
||||
|
||||
# Diagnostic reports (https://nodejs.org/api/report.html)
|
||||
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
|
||||
|
||||
# Runtime data
|
||||
pids
|
||||
*.pid
|
||||
*.seed
|
||||
*.pid.lock
|
||||
|
||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||
lib-cov
|
||||
|
||||
# Coverage directory used by tools like istanbul
|
||||
coverage
|
||||
*.lcov
|
||||
|
||||
# nyc test coverage
|
||||
.nyc_output
|
||||
|
||||
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
|
||||
.grunt
|
||||
|
||||
# Bower dependency directory (https://bower.io/)
|
||||
bower_components
|
||||
|
||||
# node-waf configuration
|
||||
.lock-wscript
|
||||
|
||||
# Compiled binary addons (https://nodejs.org/api/addons.html)
|
||||
build/Release
|
||||
|
||||
# Dependency directories
|
||||
node_modules/
|
||||
jspm_packages/
|
||||
|
||||
# Snowpack dependency directory (https://snowpack.dev/)
|
||||
web_modules/
|
||||
|
||||
# TypeScript cache
|
||||
*.tsbuildinfo
|
||||
|
||||
# Optional npm cache directory
|
||||
.npm
|
||||
|
||||
# Optional eslint cache
|
||||
.eslintcache
|
||||
|
||||
# Optional stylelint cache
|
||||
.stylelintcache
|
||||
|
||||
# Optional REPL history
|
||||
.node_repl_history
|
||||
|
||||
# Output of 'npm pack'
|
||||
*.tgz
|
||||
|
||||
# Yarn Integrity file
|
||||
.yarn-integrity
|
||||
|
||||
# dotenv environment variable files
|
||||
.env
|
||||
.env.*
|
||||
!.env.example
|
||||
|
||||
# parcel-bundler cache (https://parceljs.org/)
|
||||
.cache
|
||||
.parcel-cache
|
||||
|
||||
# Next.js build output
|
||||
.next
|
||||
out
|
||||
|
||||
# Nuxt.js build / generate output
|
||||
.nuxt
|
||||
dist
|
||||
.output
|
||||
|
||||
# Gatsby files
|
||||
.cache/
|
||||
# Comment in the public line in if your project uses Gatsby and not Next.js
|
||||
# https://nextjs.org/blog/next-9-1#public-directory-support
|
||||
# public
|
||||
|
||||
# vuepress build output
|
||||
.vuepress/dist
|
||||
|
||||
# vuepress v2.x temp and cache directory
|
||||
.temp
|
||||
.cache
|
||||
|
||||
# Sveltekit cache directory
|
||||
.svelte-kit/
|
||||
|
||||
# vitepress build output
|
||||
**/.vitepress/dist
|
||||
|
||||
# vitepress cache directory
|
||||
**/.vitepress/cache
|
||||
|
||||
# Docusaurus cache and generated files
|
||||
.docusaurus
|
||||
|
||||
# Serverless directories
|
||||
.serverless/
|
||||
|
||||
# FuseBox cache
|
||||
.fusebox/
|
||||
|
||||
# DynamoDB Local files
|
||||
.dynamodb/
|
||||
|
||||
# Firebase cache directory
|
||||
.firebase/
|
||||
|
||||
# TernJS port file
|
||||
.tern-port
|
||||
|
||||
# Stores VSCode versions used for testing VSCode extensions
|
||||
.vscode-test
|
||||
|
||||
# yarn v3
|
||||
.pnp.*
|
||||
.yarn/*
|
||||
!.yarn/patches
|
||||
!.yarn/plugins
|
||||
!.yarn/releases
|
||||
!.yarn/sdks
|
||||
!.yarn/versions
|
||||
|
||||
# Vite files
|
||||
vite.config.js.timestamp-*
|
||||
vite.config.ts.timestamp-*
|
||||
.vite/
|
||||
|
||||
### Custom ###
|
||||
|
||||
debug.log
|
||||
scratches/
|
||||
test-results/
|
||||
|
||||
data/
|
||||
*.arff
|
||||
|
||||
# Auto generated
|
||||
static/css/output.css
|
||||
|
||||
# macOS system files
|
||||
.DS_Store
|
||||
.Trashes
|
||||
._*
|
||||
|
||||
@ -8,7 +8,6 @@ repos:
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
- id: check-added-large-files
|
||||
exclude: ^static/images/|fixtures/
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.13.3
|
||||
@ -21,15 +20,6 @@ repos:
|
||||
- id: ruff-format
|
||||
types_or: [python, pyi]
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: prettier-jinja-templates
|
||||
name: Format Jinja templates with Prettier
|
||||
entry: pnpm exec prettier --plugin=prettier-plugin-jinja-template --parser=jinja-template --write
|
||||
language: system
|
||||
types: [file]
|
||||
files: ^templates/.*\.html$
|
||||
|
||||
# - repo: local
|
||||
# hooks:
|
||||
# - id: django-check
|
||||
|
||||
@ -1,11 +0,0 @@
|
||||
{
|
||||
"plugins": ["prettier-plugin-jinja-template", "prettier-plugin-tailwindcss"],
|
||||
"overrides": [
|
||||
{
|
||||
"files": "templates/**/*.html",
|
||||
"options": {
|
||||
"parser": "jinja-template"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
74
README.md
74
README.md
@ -7,12 +7,11 @@ These instructions will guide you through setting up the project for local devel
|
||||
### Prerequisites
|
||||
|
||||
- Python 3.11 or later
|
||||
- [uv](https://github.com/astral-sh/uv) - Python package manager
|
||||
- **Docker and Docker Compose** - Required for running PostgreSQL database and Redis (for async Celery tasks)
|
||||
- [uv](https://github.com/astral-sh/uv) - A fast Python package installer and resolver.
|
||||
- **Docker and Docker Compose** - Required for running the PostgreSQL database.
|
||||
- Git
|
||||
- Make
|
||||
|
||||
> **Note:** This application requires PostgreSQL (uses `ArrayField`). Docker is the easiest way to run PostgreSQL locally.
|
||||
> **Note:** This application requires PostgreSQL, which uses `ArrayField`. Docker is the recommended way to run PostgreSQL locally.
|
||||
|
||||
### 1. Install Dependencies
|
||||
|
||||
@ -24,12 +23,7 @@ Then, sync the project dependencies. This will create a virtual environment in `
|
||||
uv sync --dev
|
||||
```
|
||||
|
||||
Note on RDkit installation: if you have rdkit installed on your system globally with a different version of python, the installation will try to link against that and subsequent calls fail. Only option remove global rdkit and rerun sync.
|
||||
|
||||
---
|
||||
|
||||
The frontend requires `pnpm` to correctly display in development.
|
||||
[Install it here](https://pnpm.io/installation).
|
||||
> **Note on RDkit:** If you have a different version of rdkit installed globally, the dependency installation may fail. If this happens, please uninstall the global version and run `uv sync` again.
|
||||
|
||||
### 2. Set Up Environment File
|
||||
|
||||
@ -50,7 +44,6 @@ uv run poe setup
|
||||
```
|
||||
|
||||
This single command will:
|
||||
|
||||
1. Start the PostgreSQL database using Docker Compose.
|
||||
2. Run database migrations.
|
||||
3. Bootstrap initial data (anonymous user, default packages, models).
|
||||
@ -61,12 +54,9 @@ After setup, start the development server:
|
||||
uv run poe dev
|
||||
```
|
||||
|
||||
This will start the css-watcher as well as the django-development server,
|
||||
The application will be available at `http://localhost:8000`.
|
||||
|
||||
**Note:** The development server automatically starts a CSS watcher (`pnpm run dev`) alongside the Django server to rebuild CSS files when changes are detected. This ensures your styles are always up-to-date during development.
|
||||
|
||||
#### Other useful Poe commands
|
||||
#### Other useful Poe commands:
|
||||
|
||||
You can list all available commands by running `uv run poe --help`.
|
||||
|
||||
@ -76,50 +66,26 @@ uv run poe db-down # Stop PostgreSQL
|
||||
uv run poe migrate # Run migrations only
|
||||
uv run poe bootstrap # Bootstrap data only
|
||||
uv run poe shell # Open the Django shell
|
||||
uv run poe build # Build frontend assets and collect static files
|
||||
uv run poe clean # Remove database volumes (WARNING: destroys all data)
|
||||
uv run poe celery # Start Celery worker for async task processing
|
||||
uv run poe celery-dev # Start database and Celery worker
|
||||
```
|
||||
|
||||
### 4. Async Celery Setup (Optional)
|
||||
|
||||
By default, Celery tasks run synchronously (`CELERY_TASK_ALWAYS_EAGER = True`), which means prediction tasks block the HTTP request until completion. To enable asynchronous task processing with live status updates on pathway pages:
|
||||
|
||||
1. **Set the Celery flag in your `.env` file:**
|
||||
|
||||
```bash
|
||||
FLAG_CELERY_PRESENT=True
|
||||
```
|
||||
|
||||
2. **Start Redis and Celery worker:**
|
||||
|
||||
```bash
|
||||
uv run poe celery-dev
|
||||
```
|
||||
|
||||
3. **Start the development server** (in another terminal):
|
||||
```bash
|
||||
uv run poe dev
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
- **Docker Connection Error:** If you see an error like `open //./pipe/dockerDesktopLinuxEngine: The system cannot find the file specified` (on Windows), it likely means your Docker Desktop application is not running. Please start Docker Desktop and try the command again.
|
||||
* **Docker Connection Error:** If you see an error like `open //./pipe/dockerDesktopLinuxEngine: The system cannot find the file specified` (on Windows), it likely means your Docker Desktop application is not running. Please start Docker Desktop and try the command again.
|
||||
|
||||
- **SSH Keys for Git Dependencies:** Some dependencies are installed from private git repositories and require SSH authentication. Ensure your SSH keys are configured correctly for Git.
|
||||
- For a general guide, see [GitHub's official documentation](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent).
|
||||
- **Windows Users:** If `uv sync` hangs while fetching git dependencies, you may need to explicitly configure Git to use the Windows OpenSSH client and use the `ssh-agent` to manage your key's passphrase.
|
||||
1. **Point Git to the correct SSH executable:**
|
||||
```powershell
|
||||
git config --global core.sshCommand "C:/Windows/System32/OpenSSH/ssh.exe"
|
||||
```
|
||||
2. **Enable and use the SSH agent:**
|
||||
* **SSH Keys for Git Dependencies:** Some dependencies are installed from private git repositories and require SSH authentication. Ensure your SSH keys are configured correctly for Git.
|
||||
* For a general guide, see [GitHub's official documentation](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent).
|
||||
* **Windows Users:** If `uv sync` hangs while fetching git dependencies, you may need to explicitly configure Git to use the Windows OpenSSH client and use the `ssh-agent` to manage your key's passphrase.
|
||||
|
||||
```powershell
|
||||
# Run these commands in an administrator PowerShell
|
||||
Get-Service ssh-agent | Set-Service -StartupType Automatic -PassThru | Start-Service
|
||||
1. **Point Git to the correct SSH executable:**
|
||||
```powershell
|
||||
git config --global core.sshCommand "C:/Windows/System32/OpenSSH/ssh.exe"
|
||||
```
|
||||
2. **Enable and use the SSH agent:**
|
||||
```powershell
|
||||
# Run these commands in an administrator PowerShell
|
||||
Get-Service ssh-agent | Set-Service -StartupType Automatic -PassThru | Start-Service
|
||||
|
||||
# Add your key to the agent. It will prompt for the passphrase once.
|
||||
ssh-add
|
||||
```
|
||||
# Add your key to the agent. It will prompt for the passphrase once.
|
||||
ssh-add
|
||||
```
|
||||
|
||||
@ -1,233 +0,0 @@
|
||||
import enum
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from .dto import BuildResult, EnviPyDTO, EvaluationResult, RunResult
|
||||
|
||||
|
||||
class PropertyType(enum.Enum):
|
||||
"""
|
||||
Enumeration representing different types of properties.
|
||||
|
||||
PropertyType is an Enum class that defines categories or types of properties
|
||||
based on their weight or nature. It can typically be used when classifying
|
||||
objects or entities by their weight classification, such as lightweight or heavy.
|
||||
"""
|
||||
|
||||
LIGHTWEIGHT = "lightweight"
|
||||
HEAVY = "heavy"
|
||||
|
||||
|
||||
class Plugin(ABC):
|
||||
"""
|
||||
Defines an abstract base class Plugin to serve as a blueprint for plugins.
|
||||
|
||||
This class establishes the structure that all plugin implementations must
|
||||
follow. It enforces the presence of required methods to ensure consistent
|
||||
functionality across all derived classes.
|
||||
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def identifier(self) -> str:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def name(self) -> str:
|
||||
"""
|
||||
Represents an abstract method that provides a contract for implementing a method
|
||||
to return a name as a string. Must be implemented in subclasses.
|
||||
Name must be unique across all plugins.
|
||||
|
||||
Methods
|
||||
-------
|
||||
name() -> str
|
||||
Abstract method to be defined in subclasses, which returns a string
|
||||
representing a name.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def display(self) -> str:
|
||||
"""
|
||||
An abstract method that must be implemented by subclasses to display
|
||||
specific information or behavior. The method ensures that all subclasses
|
||||
provide their own implementation of the display functionality.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: Raises this error when the method is not implemented
|
||||
in a subclass.
|
||||
|
||||
Returns:
|
||||
str: A string used in dropdown menus or other user interfaces to display
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class Property(Plugin):
|
||||
@abstractmethod
|
||||
def requires_rule_packages(self) -> bool:
|
||||
"""
|
||||
Defines an abstract method to determine whether rule packages are required.
|
||||
|
||||
This method should be implemented by subclasses to specify if they depend
|
||||
on rule packages for their functioning.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If the subclass has not implemented this method.
|
||||
|
||||
@return: A boolean indicating if rule packages are required.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def requires_data_packages(self) -> bool:
|
||||
"""
|
||||
Defines an abstract method to determine whether data packages are required.
|
||||
|
||||
This method should be implemented by subclasses to specify if they depend
|
||||
on data packages for their functioning.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If the subclass has not implemented this method.
|
||||
|
||||
Returns:
|
||||
bool: True if the service requires data packages, False otherwise.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_type(self) -> PropertyType:
|
||||
"""
|
||||
An abstract method that provides the type of property. This method must
|
||||
be implemented by subclasses to specify the appropriate property type.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If the method is not implemented by a subclass.
|
||||
|
||||
Returns:
|
||||
PropertyType: The type of the property associated with the implementation.
|
||||
"""
|
||||
pass
|
||||
|
||||
def is_heavy(self):
|
||||
"""
|
||||
Determines if the current property type is heavy.
|
||||
|
||||
This method evaluates whether the property type returned from the `get_type()`
|
||||
method is classified as `HEAVY`. It utilizes the `PropertyType.HEAVY` constant
|
||||
for this comparison.
|
||||
|
||||
Raises:
|
||||
AttributeError: If the `get_type()` method is not defined or does not return
|
||||
a valid value.
|
||||
|
||||
Returns:
|
||||
bool: True if the property type is `HEAVY`, otherwise False.
|
||||
"""
|
||||
return self.get_type() == PropertyType.HEAVY
|
||||
|
||||
@abstractmethod
|
||||
def build(self, eP: EnviPyDTO, *args, **kwargs) -> BuildResult | None:
|
||||
"""
|
||||
Abstract method to prepare and construct a specific build process based on the provided
|
||||
environment data transfer object (EnviPyDTO). This method should be implemented by
|
||||
subclasses to handle the particular requirements of the environment.
|
||||
|
||||
Parameters:
|
||||
eP : EnviPyDTO
|
||||
The data transfer object containing environment details for the build process.
|
||||
|
||||
*args :
|
||||
Additional positional arguments required for the build.
|
||||
|
||||
**kwargs :
|
||||
Additional keyword arguments to offer flexibility and customization for
|
||||
the build process.
|
||||
|
||||
Returns:
|
||||
BuildResult | None
|
||||
Returns a BuildResult instance if the build operation succeeds, else returns None.
|
||||
|
||||
Raises:
|
||||
NotImplementedError
|
||||
If the method is not implemented in a subclass.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def run(self, eP: EnviPyDTO, *args, **kwargs) -> RunResult:
|
||||
"""
|
||||
Represents an abstract base class for executing a generic process with
|
||||
provided parameters and returning a standardized result.
|
||||
|
||||
Attributes:
|
||||
None.
|
||||
|
||||
Methods:
|
||||
run(eP, *args, **kwargs):
|
||||
Executes a task with specified input parameters and optional
|
||||
arguments, returning the outcome in the form of a RunResult object.
|
||||
This is an abstract method and must be implemented in subclasses.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If the subclass does not implement the abstract
|
||||
method.
|
||||
|
||||
Parameters:
|
||||
eP (EnviPyDTO): The primary object containing information or data required
|
||||
for processing. Mandatory.
|
||||
*args: Variable length argument list for additional positional arguments.
|
||||
**kwargs: Arbitrary keyword arguments for additional options or settings.
|
||||
|
||||
Returns:
|
||||
RunResult: A result object encapsulating the status, output, or details
|
||||
of the process execution.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def evaluate(self, eP: EnviPyDTO, *args, **kwargs) -> EvaluationResult:
|
||||
"""
|
||||
Abstract method for evaluating data based on the given input and additional arguments.
|
||||
|
||||
This method is intended to be implemented by subclasses and provides
|
||||
a mechanism to perform an evaluation procedure based on input encapsulated
|
||||
in an EnviPyDTO object.
|
||||
|
||||
Parameters:
|
||||
eP : EnviPyDTO
|
||||
The data transfer object containing necessary input for evaluation.
|
||||
*args : tuple
|
||||
Additional positional arguments for the evaluation process.
|
||||
**kwargs : dict
|
||||
Additional keyword arguments for the evaluation process.
|
||||
|
||||
Returns:
|
||||
EvaluationResult
|
||||
The result of the evaluation performed by the method.
|
||||
|
||||
Raises:
|
||||
NotImplementedError
|
||||
If the method is not implemented in the subclass.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def build_and_evaluate(self, eP: EnviPyDTO, *args, **kwargs) -> EvaluationResult:
|
||||
"""
|
||||
An abstract method designed to build and evaluate a model or system using the provided
|
||||
environmental parameters and additional optional arguments.
|
||||
|
||||
Args:
|
||||
eP (EnviPyDTO): The environmental parameters required for building and evaluating.
|
||||
*args: Additional positional arguments.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
EvaluationResult: The result of the evaluation process.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If the method is not implemented by a subclass.
|
||||
"""
|
||||
pass
|
||||
140
bridge/dto.py
140
bridge/dto.py
@ -1,140 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, List, Optional, Protocol
|
||||
|
||||
from envipy_additional_information import EnviPyModel, register
|
||||
from pydantic import HttpUrl
|
||||
|
||||
from utilities.chem import FormatConverter, ProductSet
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class Context:
|
||||
uuid: str
|
||||
url: str
|
||||
work_dir: str
|
||||
|
||||
|
||||
class CompoundProto(Protocol):
|
||||
url: str | None
|
||||
name: str | None
|
||||
smiles: str
|
||||
|
||||
|
||||
class RuleProto(Protocol):
|
||||
url: str
|
||||
name: str
|
||||
|
||||
def apply(self, smiles, *args, **kwargs): ...
|
||||
|
||||
|
||||
class ReactionProto(Protocol):
|
||||
url: str
|
||||
name: str
|
||||
rules: List[RuleProto]
|
||||
|
||||
|
||||
class EnviPyDTO(Protocol):
|
||||
def get_context(self) -> Context: ...
|
||||
|
||||
def get_compounds(self) -> List[CompoundProto]: ...
|
||||
|
||||
def get_reactions(self) -> List[ReactionProto]: ...
|
||||
|
||||
def get_rules(self) -> List[RuleProto]: ...
|
||||
|
||||
@staticmethod
|
||||
def standardize(smiles, remove_stereo=False, canonicalize_tautomers=False): ...
|
||||
|
||||
@staticmethod
|
||||
def apply(
|
||||
smiles: str,
|
||||
smirks: str,
|
||||
preprocess_smiles: bool = True,
|
||||
bracketize: bool = True,
|
||||
standardize: bool = True,
|
||||
kekulize: bool = True,
|
||||
remove_stereo: bool = True,
|
||||
reactant_filter_smarts: str | None = None,
|
||||
product_filter_smarts: str | None = None,
|
||||
) -> List["ProductSet"]: ...
|
||||
|
||||
|
||||
class PredictedProperty(EnviPyModel):
|
||||
pass
|
||||
|
||||
|
||||
@register("buildresult")
|
||||
class BuildResult(EnviPyModel):
|
||||
data: dict[str, Any] | List[dict[str, Any]] | None
|
||||
|
||||
|
||||
@register("runresult")
|
||||
class RunResult(EnviPyModel):
|
||||
producer: HttpUrl
|
||||
description: Optional[str] = None
|
||||
result: PredictedProperty | List[PredictedProperty]
|
||||
|
||||
|
||||
@register("evaluationresult")
|
||||
class EvaluationResult(EnviPyModel):
|
||||
data: dict[str, Any] | List[dict[str, Any]] | None
|
||||
|
||||
|
||||
class BaseDTO(EnviPyDTO):
|
||||
def __init__(
|
||||
self,
|
||||
uuid: str,
|
||||
url: str,
|
||||
work_dir: str,
|
||||
compounds: List[CompoundProto],
|
||||
reactions: List[ReactionProto],
|
||||
rules: List[RuleProto],
|
||||
):
|
||||
self.uuid = uuid
|
||||
self.url = url
|
||||
self.work_dir = work_dir
|
||||
self.compounds = compounds
|
||||
self.reactions = reactions
|
||||
self.rules = rules
|
||||
|
||||
def get_context(self) -> Context:
|
||||
return Context(uuid=self.uuid, url=self.url, work_dir=self.work_dir)
|
||||
|
||||
def get_compounds(self) -> List[CompoundProto]:
|
||||
return self.compounds
|
||||
|
||||
def get_reactions(self) -> List[ReactionProto]:
|
||||
return self.reactions
|
||||
|
||||
def get_rules(self) -> List[RuleProto]:
|
||||
return self.rules
|
||||
|
||||
@staticmethod
|
||||
def standardize(smiles, remove_stereo=False, canonicalize_tautomers=False):
|
||||
return FormatConverter.standardize(
|
||||
smiles, remove_stereo=remove_stereo, canonicalize_tautomers=canonicalize_tautomers
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def apply(
|
||||
smiles: str,
|
||||
smirks: str,
|
||||
preprocess_smiles: bool = True,
|
||||
bracketize: bool = True,
|
||||
standardize: bool = True,
|
||||
kekulize: bool = True,
|
||||
remove_stereo: bool = True,
|
||||
reactant_filter_smarts: str | None = None,
|
||||
product_filter_smarts: str | None = None,
|
||||
) -> List["ProductSet"]:
|
||||
return FormatConverter.apply(
|
||||
smiles,
|
||||
smirks,
|
||||
preprocess_smiles,
|
||||
bracketize,
|
||||
standardize,
|
||||
kekulize,
|
||||
remove_stereo,
|
||||
reactant_filter_smarts,
|
||||
product_filter_smarts,
|
||||
)
|
||||
@ -1,6 +1,6 @@
|
||||
services:
|
||||
db:
|
||||
image: postgres:18
|
||||
image: postgres:15
|
||||
container_name: envipath-postgres
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
@ -9,18 +9,12 @@ services:
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
container_name: envipath-redis
|
||||
ports:
|
||||
- "6379:6379"
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
from epapi.v1.router import router as v1_router # Refactored API from epdb.api_v2
|
||||
from epdb.api import router as epdb_app_router
|
||||
from epdb.legacy_api import router as epdb_legacy_app_router
|
||||
from ninja import NinjaAPI
|
||||
|
||||
@ -8,5 +8,5 @@ api_v1 = NinjaAPI(title="API V1 Docs", urls_namespace="api-v1")
|
||||
api_legacy = NinjaAPI(title="Legacy API Docs", urls_namespace="api-legacy")
|
||||
|
||||
# Add routers
|
||||
api_v1.add_router("/", v1_router)
|
||||
api_v1.add_router("/", epdb_app_router)
|
||||
api_legacy.add_router("/", epdb_legacy_app_router)
|
||||
|
||||
@ -14,15 +14,14 @@ import os
|
||||
from pathlib import Path
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from envipy_plugins import Classifier, Property, Descriptor
|
||||
from sklearn.ensemble import RandomForestClassifier
|
||||
from sklearn.tree import DecisionTreeClassifier
|
||||
|
||||
# Build paths inside the project like this: BASE_DIR / 'subdir'.
|
||||
BASE_DIR = Path(__file__).resolve().parent.parent
|
||||
|
||||
ENV_PATH = os.environ.get("ENV_PATH", BASE_DIR / ".env")
|
||||
print(f"Loading env from {ENV_PATH}")
|
||||
load_dotenv(ENV_PATH, override=False)
|
||||
load_dotenv(BASE_DIR / ".env", override=False)
|
||||
|
||||
# Quick-start development settings - unsuitable for production
|
||||
# See https://docs.djangoproject.com/en/4.2/howto/deployment/checklist/
|
||||
@ -49,25 +48,10 @@ INSTALLED_APPS = [
|
||||
"django_extensions",
|
||||
"oauth2_provider",
|
||||
# Custom
|
||||
"epapi", # API endpoints (v1, etc.)
|
||||
"epdb",
|
||||
"migration",
|
||||
]
|
||||
|
||||
TENANT = os.environ.get("TENANT", "public")
|
||||
|
||||
if TENANT != "public":
|
||||
INSTALLED_APPS.append(TENANT)
|
||||
|
||||
EPDB_PACKAGE_MODEL = os.environ.get("EPDB_PACKAGE_MODEL", "epdb.Package")
|
||||
|
||||
|
||||
def GET_PACKAGE_MODEL():
|
||||
from django.apps import apps
|
||||
|
||||
return apps.get_model(EPDB_PACKAGE_MODEL)
|
||||
|
||||
|
||||
AUTHENTICATION_BACKENDS = [
|
||||
"django.contrib.auth.backends.ModelBackend",
|
||||
]
|
||||
@ -92,19 +76,10 @@ if os.environ.get("REGISTRATION_MANDATORY", False) == "True":
|
||||
|
||||
ROOT_URLCONF = "envipath.urls"
|
||||
|
||||
TEMPLATE_DIRS = [
|
||||
os.path.join(BASE_DIR, "templates"),
|
||||
]
|
||||
|
||||
# If we have a non-public tenant, we might need to overwrite some templates
|
||||
# search TENANT folder first...
|
||||
if TENANT != "public":
|
||||
TEMPLATE_DIRS.insert(0, os.path.join(BASE_DIR, TENANT, "templates"))
|
||||
|
||||
TEMPLATES = [
|
||||
{
|
||||
"BACKEND": "django.template.backends.django.DjangoTemplates",
|
||||
"DIRS": TEMPLATE_DIRS,
|
||||
"DIRS": (os.path.join(BASE_DIR, "templates"),),
|
||||
"APP_DIRS": True,
|
||||
"OPTIONS": {
|
||||
"context_processors": [
|
||||
@ -112,14 +87,11 @@ TEMPLATES = [
|
||||
"django.template.context_processors.request",
|
||||
"django.contrib.auth.context_processors.auth",
|
||||
"django.contrib.messages.context_processors.messages",
|
||||
"epdb.context_processors.package_context",
|
||||
],
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
ALLOWED_HTML_TAGS = {"b", "i", "u", "br", "em", "mark", "p", "s", "strong"}
|
||||
|
||||
WSGI_APPLICATION = "envipath.wsgi.application"
|
||||
|
||||
# Database
|
||||
@ -136,13 +108,6 @@ DATABASES = {
|
||||
}
|
||||
}
|
||||
|
||||
if os.environ.get("USE_TEMPLATE_DB", False) == "True":
|
||||
DATABASES["default"]["TEST"] = {
|
||||
"NAME": f"test_{os.environ['TEMPLATE_DB']}",
|
||||
"TEMPLATE": os.environ["TEMPLATE_DB"],
|
||||
}
|
||||
|
||||
|
||||
# Password validation
|
||||
# https://docs.djangoproject.com/en/4.2/ref/settings/#auth-password-validators
|
||||
|
||||
@ -216,12 +181,6 @@ if not os.path.exists(LOG_DIR):
|
||||
os.mkdir(LOG_DIR)
|
||||
|
||||
PLUGIN_DIR = os.path.join(EP_DATA_DIR, "plugins")
|
||||
|
||||
API_PAGINATION_DEFAULT_PAGE_SIZE = int(os.environ.get("API_PAGINATION_DEFAULT_PAGE_SIZE", 50))
|
||||
PAGINATION_MAX_PER_PAGE_SIZE = int(
|
||||
os.environ.get("API_PAGINATION_MAX_PAGE_SIZE", 100)
|
||||
) # Ninja override
|
||||
|
||||
if not os.path.exists(PLUGIN_DIR):
|
||||
os.mkdir(PLUGIN_DIR)
|
||||
|
||||
@ -284,7 +243,6 @@ LOGGING = {
|
||||
ENVIFORMER_PRESENT = os.environ.get("ENVIFORMER_PRESENT", "False") == "True"
|
||||
ENVIFORMER_DEVICE = os.environ.get("ENVIFORMER_DEVICE", "cpu")
|
||||
|
||||
|
||||
# If celery is not present set always eager to true which will cause delayed tasks to block until finished
|
||||
FLAG_CELERY_PRESENT = os.environ.get("FLAG_CELERY_PRESENT", "False") == "True"
|
||||
if not FLAG_CELERY_PRESENT:
|
||||
@ -326,19 +284,22 @@ DEFAULT_MODEL_PARAMS = {
|
||||
"num_chains": 10,
|
||||
}
|
||||
|
||||
DEFAULT_MAX_NUMBER_OF_NODES = 50
|
||||
DEFAULT_MAX_DEPTH = 8
|
||||
DEFAULT_MAX_NUMBER_OF_NODES = 30
|
||||
DEFAULT_MAX_DEPTH = 5
|
||||
DEFAULT_MODEL_THRESHOLD = 0.25
|
||||
|
||||
# Loading Plugins
|
||||
PLUGINS_ENABLED = os.environ.get("PLUGINS_ENABLED", "False") == "True"
|
||||
BASE_PLUGINS = [
|
||||
"pepper.PEPPER",
|
||||
]
|
||||
if PLUGINS_ENABLED:
|
||||
from utilities.plugin import discover_plugins
|
||||
|
||||
CLASSIFIER_PLUGINS = {}
|
||||
PROPERTY_PLUGINS = {}
|
||||
DESCRIPTOR_PLUGINS = {}
|
||||
CLASSIFIER_PLUGINS = discover_plugins(_cls=Classifier)
|
||||
PROPERTY_PLUGINS = discover_plugins(_cls=Property)
|
||||
DESCRIPTOR_PLUGINS = discover_plugins(_cls=Descriptor)
|
||||
else:
|
||||
CLASSIFIER_PLUGINS = {}
|
||||
PROPERTY_PLUGINS = {}
|
||||
DESCRIPTOR_PLUGINS = {}
|
||||
|
||||
SENTRY_ENABLED = os.environ.get("SENTRY_ENABLED", "False") == "True"
|
||||
if SENTRY_ENABLED:
|
||||
@ -376,21 +337,12 @@ FLAGS = {
|
||||
# -> /password_reset/done is covered as well
|
||||
LOGIN_EXEMPT_URLS = [
|
||||
"/register",
|
||||
"/api/v1/", # Let API handle its own authentication
|
||||
"/api/legacy/",
|
||||
"/o/token/",
|
||||
"/o/userinfo/",
|
||||
"/password_reset/",
|
||||
"/reset/",
|
||||
"/microsoft/",
|
||||
"/terms",
|
||||
"/privacy",
|
||||
"/cookie-policy",
|
||||
"/about",
|
||||
"/contact",
|
||||
"/careers",
|
||||
"/cite",
|
||||
"/legal",
|
||||
]
|
||||
|
||||
# MS AD/Entra
|
||||
@ -408,9 +360,3 @@ if MS_ENTRA_ENABLED:
|
||||
|
||||
# Site ID 10 -> beta.envipath.org
|
||||
MATOMO_SITE_ID = os.environ.get("MATOMO_SITE_ID", "10")
|
||||
|
||||
# CAP
|
||||
CAP_ENABLED = os.environ.get("CAP_ENABLED", "False") == "True"
|
||||
CAP_API_BASE = os.environ.get("CAP_API_BASE", None)
|
||||
CAP_SITE_KEY = os.environ.get("CAP_SITE_KEY", None)
|
||||
CAP_SECRET_KEY = os.environ.get("CAP_SECRET_KEY", None)
|
||||
|
||||
@ -23,20 +23,12 @@ from .api import api_v1, api_legacy
|
||||
|
||||
urlpatterns = [
|
||||
path("", include("epdb.urls")),
|
||||
path("", include("migration.urls")),
|
||||
path("admin/", admin.site.urls),
|
||||
path("api/v1/", api_v1.urls),
|
||||
path("api/legacy/", api_legacy.urls),
|
||||
path("o/", include("oauth2_provider.urls", namespace="oauth2_provider")),
|
||||
]
|
||||
|
||||
if "migration" in s.INSTALLED_APPS:
|
||||
urlpatterns.append(path("", include("migration.urls")))
|
||||
|
||||
if s.MS_ENTRA_ENABLED:
|
||||
urlpatterns.append(path("", include("epauth.urls")))
|
||||
|
||||
# Custom error handlers
|
||||
handler400 = "epdb.views.handler400"
|
||||
handler403 = "epdb.views.handler403"
|
||||
handler404 = "epdb.views.handler404"
|
||||
handler500 = "epdb.views.handler500"
|
||||
|
||||
@ -1,6 +0,0 @@
|
||||
from django.apps import AppConfig
|
||||
|
||||
|
||||
class EpapiConfig(AppConfig):
|
||||
default_auto_field = "django.db.models.BigAutoField"
|
||||
name = "epapi"
|
||||
@ -1 +0,0 @@
|
||||
# Tests for epapi app
|
||||
@ -1 +0,0 @@
|
||||
"""Tests for epapi utility modules."""
|
||||
@ -1,218 +0,0 @@
|
||||
"""
|
||||
Tests for validation error utilities.
|
||||
|
||||
Tests the format_validation_error() and handle_validation_error() functions
|
||||
that transform Pydantic validation errors into user-friendly messages.
|
||||
"""
|
||||
|
||||
from django.test import TestCase, tag
|
||||
import json
|
||||
from pydantic import BaseModel, ValidationError, field_validator
|
||||
from typing import Literal
|
||||
|
||||
from ninja.errors import HttpError
|
||||
from epapi.utils.validation_errors import format_validation_error, handle_validation_error
|
||||
|
||||
|
||||
@tag("api", "utils")
|
||||
class ValidationErrorUtilityTests(TestCase):
|
||||
"""Test validation error utility functions."""
|
||||
|
||||
def test_format_missing_field_error(self):
|
||||
"""Test formatting of missing required field error."""
|
||||
|
||||
# Create a model with required field
|
||||
class TestModel(BaseModel):
|
||||
required_field: str
|
||||
|
||||
# Trigger validation error
|
||||
try:
|
||||
TestModel()
|
||||
except ValidationError as e:
|
||||
errors = e.errors()
|
||||
self.assertEqual(len(errors), 1)
|
||||
formatted = format_validation_error(errors[0])
|
||||
self.assertEqual(formatted, "This field is required")
|
||||
|
||||
def test_format_enum_error(self):
|
||||
"""Test formatting of enum validation error."""
|
||||
|
||||
class TestModel(BaseModel):
|
||||
status: Literal["active", "inactive"]
|
||||
|
||||
try:
|
||||
TestModel(status="invalid")
|
||||
except ValidationError as e:
|
||||
errors = e.errors()
|
||||
self.assertEqual(len(errors), 1)
|
||||
formatted = format_validation_error(errors[0])
|
||||
# Literal errors get formatted as "Please enter ..." with the valid options
|
||||
self.assertIn("Please enter", formatted)
|
||||
self.assertIn("active", formatted)
|
||||
self.assertIn("inactive", formatted)
|
||||
|
||||
def test_format_type_errors(self):
|
||||
"""Test formatting of type validation errors (string, int, float)."""
|
||||
test_cases = [
|
||||
# (field_type, invalid_value, expected_message)
|
||||
# Note: We don't check exact error_type as Pydantic may use different types
|
||||
# (e.g., int_type vs int_parsing) but we verify the formatted message is correct
|
||||
(str, 123, "Please enter a valid string"),
|
||||
(int, "not_a_number", "Please enter a valid int"),
|
||||
(float, "not_a_float", "Please enter a valid float"),
|
||||
]
|
||||
|
||||
for field_type, invalid_value, expected_message in test_cases:
|
||||
with self.subTest(field_type=field_type.__name__):
|
||||
|
||||
class TestModel(BaseModel):
|
||||
field: field_type
|
||||
|
||||
try:
|
||||
TestModel(field=invalid_value)
|
||||
except ValidationError as e:
|
||||
errors = e.errors()
|
||||
self.assertEqual(len(errors), 1)
|
||||
formatted = format_validation_error(errors[0])
|
||||
self.assertEqual(formatted, expected_message)
|
||||
|
||||
def test_format_value_error(self):
|
||||
"""Test formatting of value error from custom validator."""
|
||||
|
||||
class TestModel(BaseModel):
|
||||
age: int
|
||||
|
||||
@field_validator("age")
|
||||
@classmethod
|
||||
def validate_age(cls, v):
|
||||
if v < 0:
|
||||
raise ValueError("Age must be positive")
|
||||
return v
|
||||
|
||||
try:
|
||||
TestModel(age=-5)
|
||||
except ValidationError as e:
|
||||
errors = e.errors()
|
||||
self.assertEqual(len(errors), 1)
|
||||
formatted = format_validation_error(errors[0])
|
||||
self.assertEqual(formatted, "Age must be positive")
|
||||
|
||||
def test_format_unknown_error_type_fallback(self):
|
||||
"""Test that unknown error types fall back to default formatting."""
|
||||
# Mock an error with an unknown type
|
||||
mock_error = {
|
||||
"type": "unknown_custom_type",
|
||||
"msg": "Input should be a valid email address",
|
||||
"ctx": {},
|
||||
}
|
||||
|
||||
formatted = format_validation_error(mock_error)
|
||||
# Should use the else branch which does replacements on the message
|
||||
self.assertEqual(formatted, "Please enter a valid email address")
|
||||
|
||||
def test_handle_validation_error_structure(self):
|
||||
"""Test that handle_validation_error raises HttpError with correct structure."""
|
||||
|
||||
class TestModel(BaseModel):
|
||||
name: str
|
||||
count: int
|
||||
|
||||
try:
|
||||
TestModel(name=123, count="invalid")
|
||||
except ValidationError as e:
|
||||
# handle_validation_error should raise HttpError
|
||||
with self.assertRaises(HttpError) as context:
|
||||
handle_validation_error(e)
|
||||
|
||||
http_error = context.exception
|
||||
self.assertEqual(http_error.status_code, 400)
|
||||
|
||||
# Parse the JSON from the error message
|
||||
error_data = json.loads(http_error.message)
|
||||
|
||||
# Check structure
|
||||
self.assertEqual(error_data["type"], "validation_error")
|
||||
self.assertIn("field_errors", error_data)
|
||||
self.assertIn("message", error_data)
|
||||
self.assertEqual(error_data["message"], "Please correct the errors below")
|
||||
|
||||
# Check that both fields have errors
|
||||
self.assertIn("name", error_data["field_errors"])
|
||||
self.assertIn("count", error_data["field_errors"])
|
||||
|
||||
def test_handle_validation_error_no_pydantic_internals(self):
|
||||
"""Test that handle_validation_error doesn't expose Pydantic internals."""
|
||||
|
||||
class TestModel(BaseModel):
|
||||
email: str
|
||||
|
||||
try:
|
||||
TestModel(email=123)
|
||||
except ValidationError as e:
|
||||
with self.assertRaises(HttpError) as context:
|
||||
handle_validation_error(e)
|
||||
|
||||
http_error = context.exception
|
||||
error_data = json.loads(http_error.message)
|
||||
error_str = json.dumps(error_data)
|
||||
|
||||
# Ensure no Pydantic internals are exposed
|
||||
self.assertNotIn("pydantic", error_str.lower())
|
||||
self.assertNotIn("https://errors.pydantic.dev", error_str)
|
||||
self.assertNotIn("loc", error_str)
|
||||
|
||||
def test_handle_validation_error_user_friendly_messages(self):
|
||||
"""Test that all error messages are user-friendly."""
|
||||
|
||||
class TestModel(BaseModel):
|
||||
name: str
|
||||
age: int
|
||||
status: Literal["active", "inactive"]
|
||||
|
||||
try:
|
||||
TestModel(name=123, status="invalid") # Multiple errors
|
||||
except ValidationError as e:
|
||||
with self.assertRaises(HttpError) as context:
|
||||
handle_validation_error(e)
|
||||
|
||||
http_error = context.exception
|
||||
error_data = json.loads(http_error.message)
|
||||
|
||||
# All messages should be user-friendly (contain "Please" or "This field")
|
||||
for field, messages in error_data["field_errors"].items():
|
||||
for message in messages:
|
||||
# User-friendly messages start with "Please" or "This field"
|
||||
self.assertTrue(
|
||||
message.startswith("Please") or message.startswith("This field"),
|
||||
f"Message '{message}' is not user-friendly",
|
||||
)
|
||||
|
||||
def test_handle_validation_error_multiple_errors_same_field(self):
|
||||
"""Test handling multiple validation errors for the same field."""
|
||||
|
||||
class TestModel(BaseModel):
|
||||
value: int
|
||||
|
||||
@field_validator("value")
|
||||
@classmethod
|
||||
def validate_range(cls, v):
|
||||
if v < 0:
|
||||
raise ValueError("Must be non-negative")
|
||||
if v > 100:
|
||||
raise ValueError("Must be at most 100")
|
||||
return v
|
||||
|
||||
# Test with string (type error) - this will fail before the validator runs
|
||||
try:
|
||||
TestModel(value="invalid")
|
||||
except ValidationError as e:
|
||||
with self.assertRaises(HttpError) as context:
|
||||
handle_validation_error(e)
|
||||
|
||||
http_error = context.exception
|
||||
error_data = json.loads(http_error.message)
|
||||
|
||||
# Should have error for 'value' field
|
||||
self.assertIn("value", error_data["field_errors"])
|
||||
self.assertIsInstance(error_data["field_errors"]["value"], list)
|
||||
self.assertGreater(len(error_data["field_errors"]["value"]), 0)
|
||||
@ -1 +0,0 @@
|
||||
# Tests for epapi v1 API
|
||||
@ -1,446 +0,0 @@
|
||||
"""
|
||||
Tests for Additional Information API endpoints.
|
||||
|
||||
Tests CRUD operations on scenario additional information including the new PATCH endpoint.
|
||||
"""
|
||||
|
||||
from django.test import TestCase, tag
|
||||
import json
|
||||
from uuid import uuid4
|
||||
|
||||
from epdb.logic import PackageManager, UserManager
|
||||
from epdb.models import Scenario
|
||||
|
||||
|
||||
@tag("api", "additional_information")
|
||||
class AdditionalInformationAPITests(TestCase):
|
||||
"""Test additional information API endpoints."""
|
||||
|
||||
@classmethod
|
||||
def setUpTestData(cls):
|
||||
"""Set up test data: user, package, and scenario."""
|
||||
cls.user = UserManager.create_user(
|
||||
"ai-test-user",
|
||||
"ai-test@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
cls.other_user = UserManager.create_user(
|
||||
"ai-other-user",
|
||||
"ai-other@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
cls.package = PackageManager.create_package(
|
||||
cls.user, "AI Test Package", "Test package for additional information"
|
||||
)
|
||||
# Package owned by other_user (no access for cls.user)
|
||||
cls.other_package = PackageManager.create_package(
|
||||
cls.other_user, "Other Package", "Package without access"
|
||||
)
|
||||
# Create a scenario for testing
|
||||
cls.scenario = Scenario.objects.create(
|
||||
package=cls.package,
|
||||
name="Test Scenario",
|
||||
description="Test scenario for additional information tests",
|
||||
scenario_type="biodegradation",
|
||||
scenario_date="2024-01-01",
|
||||
)
|
||||
cls.other_scenario = Scenario.objects.create(
|
||||
package=cls.other_package,
|
||||
name="Other Scenario",
|
||||
description="Scenario in package without access",
|
||||
scenario_type="biodegradation",
|
||||
scenario_date="2024-01-01",
|
||||
)
|
||||
|
||||
def test_list_all_schemas(self):
|
||||
"""Test GET /api/v1/information/schema/ returns all schemas."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
response = self.client.get("/api/v1/information/schema/")
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
data = response.json()
|
||||
self.assertIsInstance(data, dict)
|
||||
# Should have multiple schemas
|
||||
self.assertGreater(len(data), 0)
|
||||
# Each schema should have RJSF format
|
||||
for name, schema in data.items():
|
||||
self.assertIn("schema", schema)
|
||||
self.assertIn("uiSchema", schema)
|
||||
self.assertIn("formData", schema)
|
||||
self.assertIn("groups", schema)
|
||||
|
||||
def test_get_specific_schema(self):
|
||||
"""Test GET /api/v1/information/schema/{model_name}/ returns specific schema."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# Assuming 'temperature' is a valid model
|
||||
response = self.client.get("/api/v1/information/schema/temperature/")
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
data = response.json()
|
||||
self.assertIn("schema", data)
|
||||
self.assertIn("uiSchema", data)
|
||||
|
||||
def test_get_nonexistent_schema_returns_404(self):
|
||||
"""Test GET for non-existent schema returns 404."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
response = self.client.get("/api/v1/information/schema/nonexistent/")
|
||||
|
||||
self.assertEqual(response.status_code, 404)
|
||||
|
||||
def test_list_scenario_information_empty(self):
|
||||
"""Test GET /api/v1/scenario/{uuid}/information/ returns empty list initially."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
response = self.client.get(f"/api/v1/scenario/{self.scenario.uuid}/information/")
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
data = response.json()
|
||||
self.assertIsInstance(data, list)
|
||||
self.assertEqual(len(data), 0)
|
||||
|
||||
def test_create_additional_information(self):
|
||||
"""Test POST creates additional information."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# Create temperature information (assuming temperature model exists)
|
||||
payload = {"interval": {"start": 20, "end": 25}}
|
||||
response = self.client.post(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
data = response.json()
|
||||
self.assertEqual(data["status"], "created")
|
||||
self.assertIn("uuid", data)
|
||||
self.assertIsNotNone(data["uuid"])
|
||||
|
||||
def test_create_with_invalid_data_returns_400(self):
|
||||
"""Test POST with invalid data returns 400 with validation errors."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# Invalid data (missing required fields or wrong types)
|
||||
payload = {"invalid_field": "value"}
|
||||
response = self.client.post(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 400)
|
||||
data = response.json()
|
||||
# Should have validation error details in 'detail' field
|
||||
self.assertIn("detail", data)
|
||||
|
||||
def test_validation_errors_are_user_friendly(self):
|
||||
"""Test that validation errors are user-friendly and field-specific."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# Invalid data - wrong type (string instead of number in interval)
|
||||
payload = {"interval": {"start": "not_a_number", "end": 25}}
|
||||
response = self.client.post(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 400)
|
||||
data = response.json()
|
||||
|
||||
# Parse the error response - Django Ninja wraps errors in 'detail'
|
||||
error_str = data.get("detail") or data.get("error")
|
||||
self.assertIsNotNone(error_str, "Response should contain error details")
|
||||
|
||||
# Parse the JSON error string
|
||||
error_data = json.loads(error_str)
|
||||
|
||||
# Check structure
|
||||
self.assertEqual(error_data.get("type"), "validation_error")
|
||||
self.assertIn("field_errors", error_data)
|
||||
self.assertIn("message", error_data)
|
||||
|
||||
# Ensure error messages are user-friendly (no Pydantic URLs or technical jargon)
|
||||
error_str = json.dumps(error_data)
|
||||
self.assertNotIn("pydantic", error_str.lower())
|
||||
self.assertNotIn("https://errors.pydantic.dev", error_str)
|
||||
self.assertNotIn("loc", error_str) # No technical field like 'loc'
|
||||
|
||||
# Check that error message is helpful
|
||||
self.assertIn("Please", error_data["message"]) # User-friendly language
|
||||
|
||||
def test_patch_additional_information(self):
|
||||
"""Test PATCH updates existing additional information."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# First create an item
|
||||
create_payload = {"interval": {"start": 20, "end": 25}}
|
||||
create_response = self.client.post(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(create_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
item_uuid = create_response.json()["uuid"]
|
||||
|
||||
# Then update it with PATCH
|
||||
update_payload = {"interval": {"start": 30, "end": 35}}
|
||||
patch_response = self.client.patch(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/item/{item_uuid}/",
|
||||
data=json.dumps(update_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(patch_response.status_code, 200)
|
||||
data = patch_response.json()
|
||||
self.assertEqual(data["status"], "updated")
|
||||
self.assertEqual(data["uuid"], item_uuid) # UUID preserved
|
||||
|
||||
# Verify the data was updated
|
||||
list_response = self.client.get(f"/api/v1/scenario/{self.scenario.uuid}/information/")
|
||||
items = list_response.json()
|
||||
self.assertEqual(len(items), 1)
|
||||
updated_item = items[0]
|
||||
self.assertEqual(updated_item["uuid"], item_uuid)
|
||||
self.assertEqual(updated_item["data"]["interval"]["start"], 30)
|
||||
self.assertEqual(updated_item["data"]["interval"]["end"], 35)
|
||||
|
||||
def test_patch_nonexistent_item_returns_404(self):
|
||||
"""Test PATCH on non-existent item returns 404."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
fake_uuid = str(uuid4())
|
||||
payload = {"interval": {"start": 30, "end": 35}}
|
||||
response = self.client.patch(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/item/{fake_uuid}/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 404)
|
||||
|
||||
def test_patch_with_invalid_data_returns_400(self):
|
||||
"""Test PATCH with invalid data returns 400."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# First create an item
|
||||
create_payload = {"interval": {"start": 20, "end": 25}}
|
||||
create_response = self.client.post(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(create_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
item_uuid = create_response.json()["uuid"]
|
||||
|
||||
# Try to update with invalid data
|
||||
invalid_payload = {"invalid_field": "value"}
|
||||
patch_response = self.client.patch(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/item/{item_uuid}/",
|
||||
data=json.dumps(invalid_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(patch_response.status_code, 400)
|
||||
|
||||
def test_patch_validation_errors_are_user_friendly(self):
|
||||
"""Test that PATCH validation errors are user-friendly and field-specific."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# First create an item
|
||||
create_payload = {"interval": {"start": 20, "end": 25}}
|
||||
create_response = self.client.post(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(create_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
item_uuid = create_response.json()["uuid"]
|
||||
|
||||
# Update with invalid data - wrong type (string instead of number in interval)
|
||||
invalid_payload = {"interval": {"start": "not_a_number", "end": 25}}
|
||||
patch_response = self.client.patch(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/item/{item_uuid}/",
|
||||
data=json.dumps(invalid_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(patch_response.status_code, 400)
|
||||
data = patch_response.json()
|
||||
|
||||
# Parse the error response - Django Ninja wraps errors in 'detail'
|
||||
error_str = data.get("detail") or data.get("error")
|
||||
self.assertIsNotNone(error_str, "Response should contain error details")
|
||||
|
||||
# Parse the JSON error string
|
||||
error_data = json.loads(error_str)
|
||||
|
||||
# Check structure
|
||||
self.assertEqual(error_data.get("type"), "validation_error")
|
||||
self.assertIn("field_errors", error_data)
|
||||
self.assertIn("message", error_data)
|
||||
|
||||
# Ensure error messages are user-friendly (no Pydantic URLs or technical jargon)
|
||||
error_str = json.dumps(error_data)
|
||||
self.assertNotIn("pydantic", error_str.lower())
|
||||
self.assertNotIn("https://errors.pydantic.dev", error_str)
|
||||
self.assertNotIn("loc", error_str) # No technical field like 'loc'
|
||||
|
||||
# Check that error message is helpful
|
||||
self.assertIn("Please", error_data["message"]) # User-friendly language
|
||||
|
||||
def test_delete_additional_information(self):
|
||||
"""Test DELETE removes additional information."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# Create an item
|
||||
create_payload = {"interval": {"start": 20, "end": 25}}
|
||||
create_response = self.client.post(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(create_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
item_uuid = create_response.json()["uuid"]
|
||||
|
||||
# Delete it
|
||||
delete_response = self.client.delete(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/item/{item_uuid}/"
|
||||
)
|
||||
|
||||
self.assertEqual(delete_response.status_code, 200)
|
||||
data = delete_response.json()
|
||||
self.assertEqual(data["status"], "deleted")
|
||||
|
||||
# Verify deletion
|
||||
list_response = self.client.get(f"/api/v1/scenario/{self.scenario.uuid}/information/")
|
||||
items = list_response.json()
|
||||
self.assertEqual(len(items), 0)
|
||||
|
||||
def test_delete_nonexistent_item_returns_404(self):
|
||||
"""Test DELETE on non-existent item returns 404."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
fake_uuid = str(uuid4())
|
||||
response = self.client.delete(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/item/{fake_uuid}/"
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 404)
|
||||
|
||||
def test_multiple_items_crud(self):
|
||||
"""Test creating, updating, and deleting multiple items."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# Create first item
|
||||
item1_payload = {"interval": {"start": 20, "end": 25}}
|
||||
response1 = self.client.post(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(item1_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
item1_uuid = response1.json()["uuid"]
|
||||
|
||||
# Create second item (different type if available, or same type)
|
||||
item2_payload = {"interval": {"start": 30, "end": 35}}
|
||||
response2 = self.client.post(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(item2_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
item2_uuid = response2.json()["uuid"]
|
||||
|
||||
# Verify both exist
|
||||
list_response = self.client.get(f"/api/v1/scenario/{self.scenario.uuid}/information/")
|
||||
items = list_response.json()
|
||||
self.assertEqual(len(items), 2)
|
||||
|
||||
# Update first item
|
||||
update_payload = {"interval": {"start": 15, "end": 20}}
|
||||
self.client.patch(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/item/{item1_uuid}/",
|
||||
data=json.dumps(update_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
# Delete second item
|
||||
self.client.delete(f"/api/v1/scenario/{self.scenario.uuid}/information/item/{item2_uuid}/")
|
||||
|
||||
# Verify final state: one item with updated data
|
||||
list_response = self.client.get(f"/api/v1/scenario/{self.scenario.uuid}/information/")
|
||||
items = list_response.json()
|
||||
self.assertEqual(len(items), 1)
|
||||
self.assertEqual(items[0]["uuid"], item1_uuid)
|
||||
self.assertEqual(items[0]["data"]["interval"]["start"], 15)
|
||||
|
||||
def test_list_info_denied_without_permission(self):
|
||||
"""User cannot list info for scenario in package they don't have access to"""
|
||||
self.client.force_login(self.user)
|
||||
response = self.client.get(f"/api/v1/scenario/{self.other_scenario.uuid}/information/")
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
def test_add_info_denied_without_permission(self):
|
||||
"""User cannot add info to scenario in package they don't have access to"""
|
||||
self.client.force_login(self.user)
|
||||
payload = {"interval": {"start": 25, "end": 30}}
|
||||
response = self.client.post(
|
||||
f"/api/v1/scenario/{self.other_scenario.uuid}/information/temperature/",
|
||||
json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
def test_update_info_denied_without_permission(self):
|
||||
"""User cannot update info in scenario they don't have access to"""
|
||||
self.client.force_login(self.other_user)
|
||||
# First create an item as other_user
|
||||
create_payload = {"interval": {"start": 20, "end": 25}}
|
||||
create_response = self.client.post(
|
||||
f"/api/v1/scenario/{self.other_scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(create_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
item_uuid = create_response.json()["uuid"]
|
||||
|
||||
# Try to update as user (who doesn't have access)
|
||||
self.client.force_login(self.user)
|
||||
update_payload = {"interval": {"start": 30, "end": 35}}
|
||||
response = self.client.patch(
|
||||
f"/api/v1/scenario/{self.other_scenario.uuid}/information/item/{item_uuid}/",
|
||||
data=json.dumps(update_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
def test_delete_info_denied_without_permission(self):
|
||||
"""User cannot delete info from scenario they don't have access to"""
|
||||
self.client.force_login(self.other_user)
|
||||
# First create an item as other_user
|
||||
create_payload = {"interval": {"start": 20, "end": 25}}
|
||||
create_response = self.client.post(
|
||||
f"/api/v1/scenario/{self.other_scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(create_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
item_uuid = create_response.json()["uuid"]
|
||||
|
||||
# Try to delete as user (who doesn't have access)
|
||||
self.client.force_login(self.user)
|
||||
response = self.client.delete(
|
||||
f"/api/v1/scenario/{self.other_scenario.uuid}/information/item/{item_uuid}/"
|
||||
)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
def test_nonexistent_scenario_returns_404(self):
|
||||
"""Test operations on non-existent scenario return 404."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
fake_uuid = uuid4()
|
||||
response = self.client.get(f"/api/v1/scenario/{fake_uuid}/information/")
|
||||
|
||||
self.assertEqual(response.status_code, 404)
|
||||
@ -1,477 +0,0 @@
|
||||
from django.test import TestCase, tag
|
||||
|
||||
from epdb.logic import GroupManager, PackageManager, UserManager
|
||||
from epdb.models import (
|
||||
Compound,
|
||||
GroupPackagePermission,
|
||||
Permission,
|
||||
UserPackagePermission,
|
||||
)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class APIPermissionTestBase(TestCase):
|
||||
"""
|
||||
Base class for API permission tests.
|
||||
|
||||
Sets up common test data:
|
||||
- user1: Owner of packages
|
||||
- user2: User with various permissions
|
||||
- user3: User with no permissions
|
||||
- reviewed_package: Public package (reviewed=True)
|
||||
- unreviewed_package_owned: Unreviewed package owned by user1
|
||||
- unreviewed_package_read: Unreviewed package with READ permission for user2
|
||||
- unreviewed_package_write: Unreviewed package with WRITE permission for user2
|
||||
- unreviewed_package_all: Unreviewed package with ALL permission for user2
|
||||
- unreviewed_package_no_access: Unreviewed package with no permissions for user2/user3
|
||||
- group_package: Unreviewed package accessible via group permission
|
||||
- test_group: Group containing user2
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def setUpTestData(cls):
|
||||
# Create users
|
||||
cls.user1 = UserManager.create_user(
|
||||
"permission-user1",
|
||||
"permission-user1@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
cls.user2 = UserManager.create_user(
|
||||
"permission-user2",
|
||||
"permission-user2@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
cls.user3 = UserManager.create_user(
|
||||
"permission-user3",
|
||||
"permission-user3@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
|
||||
# Delete default packages to ensure clean test data
|
||||
for user in [cls.user1, cls.user2, cls.user3]:
|
||||
default_pkg = user.default_package
|
||||
user.default_package = None
|
||||
user.save()
|
||||
if default_pkg:
|
||||
default_pkg.delete()
|
||||
|
||||
# Create reviewed package (public)
|
||||
cls.reviewed_package = PackageManager.create_package(
|
||||
cls.user1, "Reviewed Package", "Public package"
|
||||
)
|
||||
cls.reviewed_package.reviewed = True
|
||||
cls.reviewed_package.save()
|
||||
|
||||
# Create unreviewed packages with various permissions
|
||||
cls.unreviewed_package_owned = PackageManager.create_package(
|
||||
cls.user1, "User1 Owned Package", "Owned by user1"
|
||||
)
|
||||
|
||||
cls.unreviewed_package_read = PackageManager.create_package(
|
||||
cls.user1, "User2 Read Package", "User2 has READ permission"
|
||||
)
|
||||
UserPackagePermission.objects.create(
|
||||
user=cls.user2, package=cls.unreviewed_package_read, permission=Permission.READ[0]
|
||||
)
|
||||
|
||||
cls.unreviewed_package_write = PackageManager.create_package(
|
||||
cls.user1, "User2 Write Package", "User2 has WRITE permission"
|
||||
)
|
||||
UserPackagePermission.objects.create(
|
||||
user=cls.user2, package=cls.unreviewed_package_write, permission=Permission.WRITE[0]
|
||||
)
|
||||
|
||||
cls.unreviewed_package_all = PackageManager.create_package(
|
||||
cls.user1, "User2 All Package", "User2 has ALL permission"
|
||||
)
|
||||
UserPackagePermission.objects.create(
|
||||
user=cls.user2, package=cls.unreviewed_package_all, permission=Permission.ALL[0]
|
||||
)
|
||||
|
||||
cls.unreviewed_package_no_access = PackageManager.create_package(
|
||||
cls.user1, "No Access Package", "No permissions for user2/user3"
|
||||
)
|
||||
|
||||
# Create group and group package
|
||||
cls.test_group = GroupManager.create_group(
|
||||
cls.user1, "Test Group", "Group for permission testing"
|
||||
)
|
||||
cls.test_group.user_member.add(cls.user2)
|
||||
cls.test_group.save()
|
||||
|
||||
cls.group_package = PackageManager.create_package(
|
||||
cls.user1, "Group Package", "Accessible via group permission"
|
||||
)
|
||||
GroupPackagePermission.objects.create(
|
||||
group=cls.test_group, package=cls.group_package, permission=Permission.READ[0]
|
||||
)
|
||||
|
||||
# Create test compounds in each package
|
||||
cls.reviewed_compound = Compound.create(
|
||||
cls.reviewed_package, "C", "Reviewed Compound", "Test compound"
|
||||
)
|
||||
cls.owned_compound = Compound.create(
|
||||
cls.unreviewed_package_owned, "CC", "Owned Compound", "Test compound"
|
||||
)
|
||||
cls.read_compound = Compound.create(
|
||||
cls.unreviewed_package_read, "CCC", "Read Compound", "Test compound"
|
||||
)
|
||||
cls.write_compound = Compound.create(
|
||||
cls.unreviewed_package_write, "CCCC", "Write Compound", "Test compound"
|
||||
)
|
||||
cls.all_compound = Compound.create(
|
||||
cls.unreviewed_package_all, "CCCCC", "All Compound", "Test compound"
|
||||
)
|
||||
cls.no_access_compound = Compound.create(
|
||||
cls.unreviewed_package_no_access, "CCCCCC", "No Access Compound", "Test compound"
|
||||
)
|
||||
cls.group_compound = Compound.create(
|
||||
cls.group_package, "CCCCCCC", "Group Compound", "Test compound"
|
||||
)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class PackageListPermissionTest(APIPermissionTestBase):
|
||||
"""
|
||||
Test permissions for /api/v1/packages/ endpoint.
|
||||
|
||||
Special case: This endpoint allows anonymous access (auth=None)
|
||||
"""
|
||||
|
||||
ENDPOINT = "/api/v1/packages/"
|
||||
|
||||
def test_anonymous_user_sees_only_reviewed_packages(self):
|
||||
"""Anonymous users should only see reviewed packages."""
|
||||
self.client.logout()
|
||||
response = self.client.get(self.ENDPOINT)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# Should only see reviewed package
|
||||
self.assertEqual(payload["total_items"], 1)
|
||||
self.assertEqual(payload["items"][0]["uuid"], str(self.reviewed_package.uuid))
|
||||
self.assertEqual(payload["items"][0]["review_status"], "reviewed")
|
||||
|
||||
def test_authenticated_user_sees_all_readable_packages(self):
|
||||
"""Authenticated users see reviewed + packages they have access to."""
|
||||
self.client.force_login(self.user2)
|
||||
response = self.client.get(self.ENDPOINT)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# user2 should see:
|
||||
# - reviewed_package (public)
|
||||
# - unreviewed_package_read (READ permission)
|
||||
# - unreviewed_package_write (WRITE permission)
|
||||
# - unreviewed_package_all (ALL permission)
|
||||
# - group_package (via group membership)
|
||||
# Total: 5 packages
|
||||
self.assertEqual(payload["total_items"], 5)
|
||||
|
||||
visible_uuids = {item["uuid"] for item in payload["items"]}
|
||||
expected_uuids = {
|
||||
str(self.reviewed_package.uuid),
|
||||
str(self.unreviewed_package_read.uuid),
|
||||
str(self.unreviewed_package_write.uuid),
|
||||
str(self.unreviewed_package_all.uuid),
|
||||
str(self.group_package.uuid),
|
||||
}
|
||||
self.assertEqual(visible_uuids, expected_uuids)
|
||||
|
||||
def test_owner_sees_all_owned_packages(self):
|
||||
"""Package owner sees all packages they created."""
|
||||
self.client.force_login(self.user1)
|
||||
response = self.client.get(self.ENDPOINT)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# user1 owns all packages
|
||||
# Total: 7 packages (all packages created in setUpTestData)
|
||||
self.assertEqual(payload["total_items"], 7)
|
||||
|
||||
def test_filter_by_review_status_true(self):
|
||||
"""Filter to show only reviewed packages."""
|
||||
self.client.force_login(self.user2)
|
||||
response = self.client.get(self.ENDPOINT, {"review_status": True})
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# Only reviewed_package
|
||||
self.assertEqual(payload["total_items"], 1)
|
||||
self.assertTrue(all(item["review_status"] == "reviewed" for item in payload["items"]))
|
||||
|
||||
def test_filter_by_review_status_false(self):
|
||||
"""Filter to show only unreviewed packages."""
|
||||
self.client.force_login(self.user2)
|
||||
response = self.client.get(self.ENDPOINT, {"review_status": False})
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# user2's accessible unreviewed packages: 4
|
||||
self.assertEqual(payload["total_items"], 4)
|
||||
self.assertTrue(all(item["review_status"] == "unreviewed" for item in payload["items"]))
|
||||
|
||||
def test_anonymous_filter_unreviewed_returns_empty(self):
|
||||
"""Anonymous users get no results when filtering for unreviewed."""
|
||||
self.client.logout()
|
||||
response = self.client.get(self.ENDPOINT, {"review_status": False})
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
self.assertEqual(payload["total_items"], 0)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class GlobalCompoundListPermissionTest(APIPermissionTestBase):
|
||||
"""
|
||||
Test permissions for /api/v1/compounds/ endpoint.
|
||||
|
||||
This endpoint requires authentication.
|
||||
"""
|
||||
|
||||
ENDPOINT = "/api/v1/compounds/"
|
||||
|
||||
def test_anonymous_user_cannot_access(self):
|
||||
"""Anonymous users should get 401 Unauthorized."""
|
||||
self.client.logout()
|
||||
response = self.client.get(self.ENDPOINT)
|
||||
|
||||
self.assertEqual(response.status_code, 401)
|
||||
|
||||
def test_authenticated_user_sees_compounds_from_readable_packages(self):
|
||||
"""Authenticated users see compounds from packages they can read."""
|
||||
self.client.force_login(self.user2)
|
||||
response = self.client.get(self.ENDPOINT)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
self.assertEqual(payload["total_items"], 5)
|
||||
|
||||
visible_uuids = {item["uuid"] for item in payload["items"]}
|
||||
expected_uuids = {
|
||||
str(self.reviewed_compound.uuid),
|
||||
str(self.read_compound.uuid),
|
||||
str(self.write_compound.uuid),
|
||||
str(self.all_compound.uuid),
|
||||
str(self.group_compound.uuid),
|
||||
}
|
||||
self.assertEqual(visible_uuids, expected_uuids)
|
||||
|
||||
def test_user_without_permission_cannot_see_compound(self):
|
||||
"""User without permission to package cannot see its compounds."""
|
||||
self.client.force_login(self.user3)
|
||||
response = self.client.get(self.ENDPOINT)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# user3 should only see compounds from reviewed_package
|
||||
self.assertEqual(payload["total_items"], 1)
|
||||
self.assertEqual(payload["items"][0]["uuid"], str(self.reviewed_compound.uuid))
|
||||
|
||||
def test_owner_sees_all_compounds(self):
|
||||
"""Package owner sees all compounds they created."""
|
||||
self.client.force_login(self.user1)
|
||||
response = self.client.get(self.ENDPOINT)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# user1 owns all packages, so sees all compounds
|
||||
self.assertEqual(payload["total_items"], 7)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class PackageScopedCompoundListPermissionTest(APIPermissionTestBase):
|
||||
"""
|
||||
Test permissions for /api/v1/package/{uuid}/compound/ endpoint.
|
||||
|
||||
This endpoint requires authentication AND package access.
|
||||
"""
|
||||
|
||||
def test_anonymous_user_cannot_access_reviewed_package(self):
|
||||
"""Anonymous users should get 401 even for reviewed packages."""
|
||||
self.client.logout()
|
||||
endpoint = f"/api/v1/package/{self.reviewed_package.uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 401)
|
||||
|
||||
def test_authenticated_user_can_access_reviewed_package(self):
|
||||
"""Authenticated users can access reviewed packages."""
|
||||
self.client.force_login(self.user3)
|
||||
endpoint = f"/api/v1/package/{self.reviewed_package.uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
self.assertEqual(payload["total_items"], 1)
|
||||
self.assertEqual(payload["items"][0]["uuid"], str(self.reviewed_compound.uuid))
|
||||
|
||||
def test_user_can_access_package_with_read_permission(self):
|
||||
"""User with READ permission can access package-scoped endpoint."""
|
||||
self.client.force_login(self.user2)
|
||||
endpoint = f"/api/v1/package/{self.unreviewed_package_read.uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
self.assertEqual(payload["total_items"], 1)
|
||||
self.assertEqual(payload["items"][0]["uuid"], str(self.read_compound.uuid))
|
||||
|
||||
def test_user_can_access_package_with_write_permission(self):
|
||||
"""User with WRITE permission can access package-scoped endpoint."""
|
||||
self.client.force_login(self.user2)
|
||||
endpoint = f"/api/v1/package/{self.unreviewed_package_write.uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
self.assertEqual(payload["total_items"], 1)
|
||||
self.assertEqual(payload["items"][0]["uuid"], str(self.write_compound.uuid))
|
||||
|
||||
def test_user_can_access_package_with_all_permission(self):
|
||||
"""User with ALL permission can access package-scoped endpoint."""
|
||||
self.client.force_login(self.user2)
|
||||
endpoint = f"/api/v1/package/{self.unreviewed_package_all.uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
self.assertEqual(payload["total_items"], 1)
|
||||
self.assertEqual(payload["items"][0]["uuid"], str(self.all_compound.uuid))
|
||||
|
||||
def test_user_cannot_access_package_without_permission(self):
|
||||
"""User without permission gets 403 Forbidden."""
|
||||
self.client.force_login(self.user2)
|
||||
endpoint = f"/api/v1/package/{self.unreviewed_package_no_access.uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
def test_nonexistent_package_returns_404(self):
|
||||
"""Request for non-existent package returns 404."""
|
||||
self.client.force_login(self.user2)
|
||||
fake_uuid = "00000000-0000-0000-0000-000000000000"
|
||||
endpoint = f"/api/v1/package/{fake_uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 404)
|
||||
|
||||
def test_owner_can_access_owned_package(self):
|
||||
"""Package owner can access their package."""
|
||||
self.client.force_login(self.user1)
|
||||
endpoint = f"/api/v1/package/{self.unreviewed_package_owned.uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
self.assertEqual(payload["total_items"], 1)
|
||||
self.assertEqual(payload["items"][0]["uuid"], str(self.owned_compound.uuid))
|
||||
|
||||
def test_group_member_can_access_group_package(self):
|
||||
"""Group member can access package via group permission."""
|
||||
self.client.force_login(self.user2)
|
||||
endpoint = f"/api/v1/package/{self.group_package.uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
self.assertEqual(payload["total_items"], 1)
|
||||
self.assertEqual(payload["items"][0]["uuid"], str(self.group_compound.uuid))
|
||||
|
||||
def test_non_group_member_cannot_access_group_package(self):
|
||||
"""Non-group member cannot access package with only group permission."""
|
||||
self.client.force_login(self.user3)
|
||||
endpoint = f"/api/v1/package/{self.group_package.uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class MultiResourcePermissionTest(APIPermissionTestBase):
|
||||
"""
|
||||
Test that permission system works consistently across all resource types.
|
||||
|
||||
Tests a sample of other endpoints to ensure permission logic is consistent.
|
||||
"""
|
||||
|
||||
def test_rules_endpoint_respects_permissions(self):
|
||||
"""Rules endpoint uses same permission logic."""
|
||||
from epdb.models import SimpleAmbitRule
|
||||
|
||||
# Create rule in no-access package
|
||||
rule = SimpleAmbitRule.create(
|
||||
self.unreviewed_package_no_access, "Test Rule", "Test", "[C:1]>>[C:1]O"
|
||||
)
|
||||
|
||||
self.client.force_login(self.user2)
|
||||
response = self.client.get("/api/v1/rules/")
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# user2 should not see the rule from no_access_package
|
||||
rule_uuids = [item["uuid"] for item in payload["items"]]
|
||||
self.assertNotIn(str(rule.uuid), rule_uuids)
|
||||
|
||||
def test_reactions_endpoint_respects_permissions(self):
|
||||
"""Reactions endpoint uses same permission logic."""
|
||||
from epdb.models import Reaction
|
||||
|
||||
# Create reaction in no-access package
|
||||
reaction = Reaction.create(
|
||||
self.unreviewed_package_no_access, "Test Reaction", "Test", ["C"], ["CO"]
|
||||
)
|
||||
|
||||
self.client.force_login(self.user2)
|
||||
response = self.client.get("/api/v1/reactions/")
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# user2 should not see the reaction from no_access_package
|
||||
reaction_uuids = [item["uuid"] for item in payload["items"]]
|
||||
self.assertNotIn(str(reaction.uuid), reaction_uuids)
|
||||
|
||||
def test_pathways_endpoint_respects_permissions(self):
|
||||
"""Pathways endpoint uses same permission logic."""
|
||||
from epdb.models import Pathway
|
||||
|
||||
# Create pathway in no-access package
|
||||
pathway = Pathway.objects.create(
|
||||
package=self.unreviewed_package_no_access, name="Test Pathway", description="Test"
|
||||
)
|
||||
|
||||
self.client.force_login(self.user2)
|
||||
response = self.client.get("/api/v1/pathways/")
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# user2 should not see the pathway from no_access_package
|
||||
pathway_uuids = [item["uuid"] for item in payload["items"]]
|
||||
self.assertNotIn(str(pathway.uuid), pathway_uuids)
|
||||
@ -1,477 +0,0 @@
|
||||
from django.test import TestCase, tag
|
||||
|
||||
from epdb.logic import PackageManager, UserManager
|
||||
from epdb.models import Compound, Reaction, Pathway, EPModel, SimpleAmbitRule, Scenario
|
||||
|
||||
|
||||
class BaseTestAPIGetPaginated:
|
||||
"""
|
||||
Mixin class for API pagination tests.
|
||||
|
||||
Subclasses must inherit from both this class and TestCase, e.g.:
|
||||
class MyTest(BaseTestAPIGetPaginated, TestCase):
|
||||
...
|
||||
|
||||
Subclasses must define:
|
||||
- resource_name: Singular name (e.g., "compound")
|
||||
- resource_name_plural: Plural name (e.g., "compounds")
|
||||
- global_endpoint: Global listing endpoint (e.g., "/api/v1/compounds/")
|
||||
- package_endpoint_template: Template for package-scoped endpoint or None
|
||||
- total_reviewed: Number of reviewed items to create
|
||||
- total_unreviewed: Number of unreviewed items to create
|
||||
- create_reviewed_resource(cls, package, idx): Factory method
|
||||
- create_unreviewed_resource(cls, package, idx): Factory method
|
||||
"""
|
||||
|
||||
# Configuration to be overridden by subclasses
|
||||
resource_name = None
|
||||
resource_name_plural = None
|
||||
global_endpoint = None
|
||||
package_endpoint_template = None
|
||||
total_reviewed = 50
|
||||
total_unreviewed = 20
|
||||
default_page_size = 50
|
||||
max_page_size = 100
|
||||
|
||||
@classmethod
|
||||
def setUpTestData(cls):
|
||||
# Create test user
|
||||
cls.user = UserManager.create_user(
|
||||
f"{cls.resource_name}-user",
|
||||
f"{cls.resource_name}-user@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
|
||||
# Delete the auto-created default package to ensure clean test data
|
||||
default_pkg = cls.user.default_package
|
||||
cls.user.default_package = None
|
||||
cls.user.save()
|
||||
default_pkg.delete()
|
||||
|
||||
# Create reviewed package
|
||||
cls.reviewed_package = PackageManager.create_package(
|
||||
cls.user, "Reviewed Package", f"Reviewed package for {cls.resource_name} tests"
|
||||
)
|
||||
cls.reviewed_package.reviewed = True
|
||||
cls.reviewed_package.save()
|
||||
|
||||
# Create unreviewed package
|
||||
cls.unreviewed_package = PackageManager.create_package(
|
||||
cls.user, "Draft Package", f"Unreviewed package for {cls.resource_name} tests"
|
||||
)
|
||||
|
||||
# Create reviewed resources
|
||||
for idx in range(cls.total_reviewed):
|
||||
cls.create_reviewed_resource(cls.reviewed_package, idx)
|
||||
|
||||
# Create unreviewed resources
|
||||
for idx in range(cls.total_unreviewed):
|
||||
cls.create_unreviewed_resource(cls.unreviewed_package, idx)
|
||||
|
||||
# Set up package-scoped endpoints if applicable
|
||||
if cls.package_endpoint_template:
|
||||
cls.reviewed_package_endpoint = cls.package_endpoint_template.format(
|
||||
uuid=cls.reviewed_package.uuid
|
||||
)
|
||||
cls.unreviewed_package_endpoint = cls.package_endpoint_template.format(
|
||||
uuid=cls.unreviewed_package.uuid
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_reviewed_resource(cls, package, idx):
|
||||
"""
|
||||
Create a single reviewed resource.
|
||||
Must be implemented by subclass.
|
||||
|
||||
Args:
|
||||
package: The package to create the resource in
|
||||
idx: Index of the resource (0-based)
|
||||
"""
|
||||
raise NotImplementedError(f"{cls.__name__} must implement create_reviewed_resource()")
|
||||
|
||||
@classmethod
|
||||
def create_unreviewed_resource(cls, package, idx):
|
||||
"""
|
||||
Create a single unreviewed resource.
|
||||
Must be implemented by subclass.
|
||||
|
||||
Args:
|
||||
package: The package to create the resource in
|
||||
idx: Index of the resource (0-based)
|
||||
"""
|
||||
raise NotImplementedError(f"{cls.__name__} must implement create_unreviewed_resource()")
|
||||
|
||||
def setUp(self):
|
||||
self.client.force_login(self.user)
|
||||
|
||||
def test_requires_session_authentication(self):
|
||||
"""Test that the global endpoint requires authentication."""
|
||||
self.client.logout()
|
||||
response = self.client.get(self.global_endpoint)
|
||||
self.assertEqual(response.status_code, 401)
|
||||
|
||||
def test_global_listing_uses_default_page_size(self):
|
||||
"""Test that the global endpoint uses default pagination settings."""
|
||||
response = self.client.get(self.global_endpoint, {"review_status": True})
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
self.assertEqual(payload["page"], 1)
|
||||
self.assertEqual(payload["page_size"], self.default_page_size)
|
||||
self.assertEqual(payload["total_items"], self.total_reviewed)
|
||||
|
||||
# Verify only reviewed items are returned
|
||||
self.assertTrue(all(item["review_status"] == "reviewed" for item in payload["items"]))
|
||||
|
||||
def test_can_request_later_page(self):
|
||||
"""Test that pagination works for later pages."""
|
||||
if self.total_reviewed <= self.default_page_size:
|
||||
self.skipTest(
|
||||
f"Not enough items to test pagination "
|
||||
f"({self.total_reviewed} <= {self.default_page_size})"
|
||||
)
|
||||
|
||||
response = self.client.get(self.global_endpoint, {"page": 2, "review_status": True})
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
self.assertEqual(payload["page"], 2)
|
||||
|
||||
# Calculate expected items on page 2
|
||||
expected_items = min(self.default_page_size, self.total_reviewed - self.default_page_size)
|
||||
self.assertEqual(len(payload["items"]), expected_items)
|
||||
|
||||
# Verify only reviewed items are returned
|
||||
self.assertTrue(all(item["review_status"] == "reviewed" for item in payload["items"]))
|
||||
|
||||
def test_page_size_is_capped(self):
|
||||
"""Test that page size is capped at the maximum."""
|
||||
if self.total_reviewed <= self.max_page_size:
|
||||
self.skipTest(
|
||||
f"Not enough items to test page size cap "
|
||||
f"({self.total_reviewed} <= {self.max_page_size})"
|
||||
)
|
||||
|
||||
response = self.client.get(self.global_endpoint, {"page_size": 150})
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
self.assertEqual(payload["page_size"], self.max_page_size)
|
||||
self.assertEqual(len(payload["items"]), self.max_page_size)
|
||||
|
||||
def test_package_endpoint_for_reviewed_package(self):
|
||||
"""Test the package-scoped endpoint for reviewed packages."""
|
||||
if not self.package_endpoint_template:
|
||||
self.skipTest("No package endpoint for this resource")
|
||||
|
||||
response = self.client.get(self.reviewed_package_endpoint)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
self.assertEqual(payload["total_items"], self.total_reviewed)
|
||||
|
||||
# Verify only reviewed items are returned
|
||||
self.assertTrue(all(item["review_status"] == "reviewed" for item in payload["items"]))
|
||||
|
||||
def test_package_endpoint_for_unreviewed_package(self):
|
||||
"""Test the package-scoped endpoint for unreviewed packages."""
|
||||
if not self.package_endpoint_template:
|
||||
self.skipTest("No package endpoint for this resource")
|
||||
|
||||
response = self.client.get(self.unreviewed_package_endpoint)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
self.assertEqual(payload["total_items"], self.total_unreviewed)
|
||||
|
||||
# Verify only unreviewed items are returned
|
||||
self.assertTrue(all(item["review_status"] == "unreviewed" for item in payload["items"]))
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class PackagePaginationAPITest(TestCase):
|
||||
ENDPOINT = "/api/v1/packages/"
|
||||
|
||||
@classmethod
|
||||
def setUpTestData(cls):
|
||||
cls.user = UserManager.create_user(
|
||||
"package-user",
|
||||
"package-user@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
|
||||
# Delete the auto-created default package to ensure clean test data
|
||||
default_pkg = cls.user.default_package
|
||||
cls.user.default_package = None
|
||||
cls.user.save()
|
||||
default_pkg.delete()
|
||||
|
||||
# Create reviewed packages
|
||||
cls.total_reviewed = 25
|
||||
for idx in range(cls.total_reviewed):
|
||||
package = PackageManager.create_package(
|
||||
cls.user, f"Reviewed Package {idx:03d}", "Reviewed package for tests"
|
||||
)
|
||||
package.reviewed = True
|
||||
package.save()
|
||||
|
||||
# Create unreviewed packages
|
||||
cls.total_unreviewed = 15
|
||||
for idx in range(cls.total_unreviewed):
|
||||
PackageManager.create_package(
|
||||
cls.user, f"Draft Package {idx:03d}", "Unreviewed package for tests"
|
||||
)
|
||||
|
||||
def setUp(self):
|
||||
self.client.force_login(self.user)
|
||||
|
||||
def test_anonymous_can_access_reviewed_packages(self):
|
||||
self.client.logout()
|
||||
response = self.client.get(self.ENDPOINT)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
# Anonymous users can only see reviewed packages
|
||||
self.assertEqual(payload["total_items"], self.total_reviewed)
|
||||
self.assertTrue(all(item["review_status"] == "reviewed" for item in payload["items"]))
|
||||
|
||||
def test_listing_uses_default_page_size(self):
|
||||
response = self.client.get(self.ENDPOINT)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
self.assertEqual(payload["page"], 1)
|
||||
self.assertEqual(payload["page_size"], 50)
|
||||
self.assertEqual(payload["total_items"], self.total_reviewed + self.total_unreviewed)
|
||||
|
||||
def test_reviewed_filter_true(self):
|
||||
response = self.client.get(self.ENDPOINT, {"review_status": True})
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
self.assertEqual(payload["total_items"], self.total_reviewed)
|
||||
self.assertTrue(all(item["review_status"] == "reviewed" for item in payload["items"]))
|
||||
|
||||
def test_reviewed_filter_false(self):
|
||||
response = self.client.get(self.ENDPOINT, {"review_status": False})
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
self.assertEqual(payload["total_items"], self.total_unreviewed)
|
||||
self.assertTrue(all(item["review_status"] == "unreviewed" for item in payload["items"]))
|
||||
|
||||
def test_reviewed_filter_false_anonymous(self):
|
||||
self.client.logout()
|
||||
response = self.client.get(self.ENDPOINT, {"review_status": False})
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
# Anonymous users cannot access unreviewed packages
|
||||
self.assertEqual(payload["total_items"], 0)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class CompoundPaginationAPITest(BaseTestAPIGetPaginated, TestCase):
|
||||
"""Compound pagination tests using base class."""
|
||||
|
||||
resource_name = "compound"
|
||||
resource_name_plural = "compounds"
|
||||
global_endpoint = "/api/v1/compounds/"
|
||||
package_endpoint_template = "/api/v1/package/{uuid}/compound/"
|
||||
total_reviewed = 125
|
||||
total_unreviewed = 35
|
||||
|
||||
@classmethod
|
||||
def create_reviewed_resource(cls, package, idx):
|
||||
simple_smiles = ["C", "CC", "CCC", "CCCC", "CCCCC"]
|
||||
smiles = simple_smiles[idx % len(simple_smiles)] + ("O" * (idx // len(simple_smiles)))
|
||||
return Compound.create(
|
||||
package,
|
||||
smiles,
|
||||
f"Reviewed Compound {idx:03d}",
|
||||
"Compound for pagination tests",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_unreviewed_resource(cls, package, idx):
|
||||
simple_smiles = ["C", "CC", "CCC", "CCCC", "CCCCC"]
|
||||
smiles = simple_smiles[idx % len(simple_smiles)] + ("N" * (idx // len(simple_smiles)))
|
||||
return Compound.create(
|
||||
package,
|
||||
smiles,
|
||||
f"Draft Compound {idx:03d}",
|
||||
"Compound for pagination tests",
|
||||
)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class RulePaginationAPITest(BaseTestAPIGetPaginated, TestCase):
|
||||
"""Rule pagination tests using base class."""
|
||||
|
||||
resource_name = "rule"
|
||||
resource_name_plural = "rules"
|
||||
global_endpoint = "/api/v1/rules/"
|
||||
package_endpoint_template = "/api/v1/package/{uuid}/rule/"
|
||||
total_reviewed = 125
|
||||
total_unreviewed = 35
|
||||
|
||||
@classmethod
|
||||
def create_reviewed_resource(cls, package, idx):
|
||||
# Create unique SMIRKS by combining chain length and functional group variations
|
||||
# This ensures each idx gets a truly unique SMIRKS pattern
|
||||
carbon_chain = "C" * (idx + 1) # C, CC, CCC, ... (grows with idx)
|
||||
smirks = f"[{carbon_chain}:1]>>[{carbon_chain}:1]O"
|
||||
return SimpleAmbitRule.create(
|
||||
package,
|
||||
f"Reviewed Rule {idx:03d}",
|
||||
f"Rule {idx} for pagination tests",
|
||||
smirks,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_unreviewed_resource(cls, package, idx):
|
||||
# Create unique SMIRKS by varying the carbon chain length
|
||||
carbon_chain = "C" * (idx + 1) # C, CC, CCC, ... (grows with idx)
|
||||
smirks = f"[{carbon_chain}:1]>>[{carbon_chain}:1]N"
|
||||
return SimpleAmbitRule.create(
|
||||
package,
|
||||
f"Draft Rule {idx:03d}",
|
||||
f"Rule {idx} for pagination tests",
|
||||
smirks,
|
||||
)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class ReactionPaginationAPITest(BaseTestAPIGetPaginated, TestCase):
|
||||
"""Reaction pagination tests using base class."""
|
||||
|
||||
resource_name = "reaction"
|
||||
resource_name_plural = "reactions"
|
||||
global_endpoint = "/api/v1/reactions/"
|
||||
package_endpoint_template = "/api/v1/package/{uuid}/reaction/"
|
||||
total_reviewed = 125
|
||||
total_unreviewed = 35
|
||||
|
||||
@classmethod
|
||||
def create_reviewed_resource(cls, package, idx):
|
||||
# Generate unique SMILES with growing chain lengths to avoid duplicates
|
||||
# Each idx gets a unique chain length
|
||||
educt_smiles = "C" * (idx + 1) # C, CC, CCC, ... (grows with idx)
|
||||
product_smiles = educt_smiles + "O"
|
||||
return Reaction.create(
|
||||
package=package,
|
||||
name=f"Reviewed Reaction {idx:03d}",
|
||||
description="Reaction for pagination tests",
|
||||
educts=[educt_smiles],
|
||||
products=[product_smiles],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_unreviewed_resource(cls, package, idx):
|
||||
# Generate unique SMILES with growing chain lengths to avoid duplicates
|
||||
# Each idx gets a unique chain length
|
||||
educt_smiles = "C" * (idx + 1) # C, CC, CCC, ... (grows with idx)
|
||||
product_smiles = educt_smiles + "N"
|
||||
return Reaction.create(
|
||||
package=package,
|
||||
name=f"Draft Reaction {idx:03d}",
|
||||
description="Reaction for pagination tests",
|
||||
educts=[educt_smiles],
|
||||
products=[product_smiles],
|
||||
)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class PathwayPaginationAPITest(BaseTestAPIGetPaginated, TestCase):
|
||||
"""Pathway pagination tests using base class."""
|
||||
|
||||
resource_name = "pathway"
|
||||
resource_name_plural = "pathways"
|
||||
global_endpoint = "/api/v1/pathways/"
|
||||
package_endpoint_template = "/api/v1/package/{uuid}/pathway/"
|
||||
total_reviewed = 125
|
||||
total_unreviewed = 35
|
||||
|
||||
@classmethod
|
||||
def create_reviewed_resource(cls, package, idx):
|
||||
return Pathway.objects.create(
|
||||
package=package,
|
||||
name=f"Reviewed Pathway {idx:03d}",
|
||||
description="Pathway for pagination tests",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_unreviewed_resource(cls, package, idx):
|
||||
return Pathway.objects.create(
|
||||
package=package,
|
||||
name=f"Draft Pathway {idx:03d}",
|
||||
description="Pathway for pagination tests",
|
||||
)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class ModelPaginationAPITest(BaseTestAPIGetPaginated, TestCase):
|
||||
"""Model pagination tests using base class."""
|
||||
|
||||
resource_name = "model"
|
||||
resource_name_plural = "models"
|
||||
global_endpoint = "/api/v1/models/"
|
||||
package_endpoint_template = "/api/v1/package/{uuid}/model/"
|
||||
total_reviewed = 125
|
||||
total_unreviewed = 35
|
||||
|
||||
@classmethod
|
||||
def create_reviewed_resource(cls, package, idx):
|
||||
return EPModel.objects.create(
|
||||
package=package,
|
||||
name=f"Reviewed Model {idx:03d}",
|
||||
description="Model for pagination tests",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_unreviewed_resource(cls, package, idx):
|
||||
return EPModel.objects.create(
|
||||
package=package,
|
||||
name=f"Draft Model {idx:03d}",
|
||||
description="Model for pagination tests",
|
||||
)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class ScenarioPaginationAPITest(BaseTestAPIGetPaginated, TestCase):
|
||||
"""Scenario pagination tests using base class."""
|
||||
|
||||
resource_name = "scenario"
|
||||
resource_name_plural = "scenarios"
|
||||
global_endpoint = "/api/v1/scenarios/"
|
||||
package_endpoint_template = "/api/v1/package/{uuid}/scenario/"
|
||||
total_reviewed = 125
|
||||
total_unreviewed = 35
|
||||
|
||||
@classmethod
|
||||
def create_reviewed_resource(cls, package, idx):
|
||||
return Scenario.create(
|
||||
package,
|
||||
f"Reviewed Scenario {idx:03d}",
|
||||
"Scenario for pagination tests",
|
||||
"2025-01-01",
|
||||
"lab",
|
||||
[],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_unreviewed_resource(cls, package, idx):
|
||||
return Scenario.create(
|
||||
package,
|
||||
f"Draft Scenario {idx:03d}",
|
||||
"Scenario for pagination tests",
|
||||
"2025-01-01",
|
||||
"field",
|
||||
[],
|
||||
)
|
||||
@ -1,301 +0,0 @@
|
||||
"""
|
||||
Tests for Scenario Creation Endpoint Error Handling.
|
||||
|
||||
Tests comprehensive error handling for POST /api/v1/package/{uuid}/scenario/
|
||||
including package not found, permission denied, validation errors, and database errors.
|
||||
"""
|
||||
|
||||
from django.test import TestCase, tag
|
||||
import json
|
||||
from uuid import uuid4
|
||||
|
||||
from epdb.logic import PackageManager, UserManager
|
||||
from epdb.models import Scenario
|
||||
|
||||
|
||||
@tag("api", "scenario_creation")
|
||||
class ScenarioCreationAPITests(TestCase):
|
||||
"""Test scenario creation endpoint error handling."""
|
||||
|
||||
@classmethod
|
||||
def setUpTestData(cls):
|
||||
"""Set up test data: users and packages."""
|
||||
cls.user = UserManager.create_user(
|
||||
"scenario-test-user",
|
||||
"scenario-test@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
cls.other_user = UserManager.create_user(
|
||||
"other-user",
|
||||
"other@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
cls.package = PackageManager.create_package(
|
||||
cls.user, "Test Package", "Test package for scenario creation"
|
||||
)
|
||||
|
||||
def test_create_scenario_package_not_found(self):
|
||||
"""Test that non-existent package UUID returns 404."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
fake_uuid = uuid4()
|
||||
payload = {
|
||||
"name": "Test Scenario",
|
||||
"description": "Test description",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{fake_uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 404)
|
||||
self.assertIn(f"Package with UUID {fake_uuid} not found", response.json()["detail"])
|
||||
|
||||
def test_create_scenario_insufficient_permissions(self):
|
||||
"""Test that unauthorized access returns 403."""
|
||||
self.client.force_login(self.other_user)
|
||||
|
||||
payload = {
|
||||
"name": "Test Scenario",
|
||||
"description": "Test description",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 403)
|
||||
self.assertIn("permission", response.json()["detail"].lower())
|
||||
|
||||
def test_create_scenario_invalid_ai_type(self):
|
||||
"""Test that unknown additional information type returns 400."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
payload = {
|
||||
"name": "Test Scenario",
|
||||
"description": "Test description",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [
|
||||
{"type": "invalid_type_that_does_not_exist", "data": {"some_field": "some_value"}}
|
||||
],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 400)
|
||||
response_data = response.json()
|
||||
self.assertIn("Validation errors", response_data["detail"])
|
||||
|
||||
def test_create_scenario_validation_error(self):
|
||||
"""Test that invalid additional information data returns 400."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# Use malformed data structure for an actual AI type
|
||||
payload = {
|
||||
"name": "Test Scenario",
|
||||
"description": "Test description",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [
|
||||
{
|
||||
"type": "invalid_type_name",
|
||||
"data": None, # This should cause a validation error
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
# Should return 422 for validation errors
|
||||
self.assertEqual(response.status_code, 422)
|
||||
|
||||
def test_create_scenario_success(self):
|
||||
"""Test that valid scenario creation returns 200."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
payload = {
|
||||
"name": "Test Scenario",
|
||||
"description": "Test description",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
data = response.json()
|
||||
self.assertEqual(data["name"], "Test Scenario")
|
||||
self.assertEqual(data["description"], "Test description")
|
||||
|
||||
# Verify scenario was actually created
|
||||
scenario = Scenario.objects.get(name="Test Scenario")
|
||||
self.assertEqual(scenario.package, self.package)
|
||||
self.assertEqual(scenario.scenario_type, "biodegradation")
|
||||
|
||||
def test_create_scenario_auto_name(self):
|
||||
"""Test that empty name triggers auto-generation."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
payload = {
|
||||
"name": "", # Empty name should be auto-generated
|
||||
"description": "Test description",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
data = response.json()
|
||||
# Auto-generated name should follow pattern "Scenario N"
|
||||
self.assertTrue(data["name"].startswith("Scenario "))
|
||||
|
||||
def test_create_scenario_xss_protection(self):
|
||||
"""Test that XSS attempts are sanitized."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
payload = {
|
||||
"name": "<script>alert('xss')</script>Clean Name",
|
||||
"description": "<img src=x onerror=alert('xss')>Description",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
data = response.json()
|
||||
# XSS should be cleaned out
|
||||
self.assertNotIn("<script>", data["name"])
|
||||
self.assertNotIn("onerror", data["description"])
|
||||
|
||||
def test_create_scenario_missing_required_field(self):
|
||||
"""Test that missing required fields returns validation error."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# Missing 'name' field entirely
|
||||
payload = {
|
||||
"description": "Test description",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
# Should return 422 for schema validation errors
|
||||
self.assertEqual(response.status_code, 422)
|
||||
|
||||
def test_create_scenario_type_error_in_ai(self):
|
||||
"""Test that TypeError in AI instantiation returns 400."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
payload = {
|
||||
"name": "Test Scenario",
|
||||
"description": "Test description",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [
|
||||
{
|
||||
"type": "invalid_type_name",
|
||||
"data": "string instead of dict", # Wrong type
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
# Should return 422 for validation errors
|
||||
self.assertEqual(response.status_code, 422)
|
||||
|
||||
def test_create_scenario_default_values(self):
|
||||
"""Test that default values are applied correctly."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# Minimal payload with only name
|
||||
payload = {"name": "Minimal Scenario"}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
data = response.json()
|
||||
self.assertEqual(data["name"], "Minimal Scenario")
|
||||
# Check defaults are applied
|
||||
scenario = Scenario.objects.get(name="Minimal Scenario")
|
||||
# Default description from model is "no description"
|
||||
self.assertIn(scenario.description.lower(), ["", "no description"])
|
||||
|
||||
def test_create_scenario_unicode_characters(self):
|
||||
"""Test that unicode characters are handled properly."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
payload = {
|
||||
"name": "Test Scenario 测试 🧪",
|
||||
"description": "Description with émojis and spëcial çhars",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
data = response.json()
|
||||
self.assertIn("测试", data["name"])
|
||||
self.assertIn("émojis", data["description"])
|
||||
@ -1,114 +0,0 @@
|
||||
"""
|
||||
Property-based tests for schema generation.
|
||||
|
||||
Tests that verify schema generation works correctly for all models,
|
||||
regardless of their structure.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from typing import Type
|
||||
from pydantic import BaseModel
|
||||
|
||||
from envipy_additional_information import registry, EnviPyModel
|
||||
from epapi.utils.schema_transformers import build_rjsf_output
|
||||
|
||||
|
||||
class TestSchemaGeneration:
|
||||
"""Test that all models can generate valid RJSF schemas."""
|
||||
|
||||
@pytest.mark.parametrize("model_name,model_cls", list(registry.list_models().items()))
|
||||
def test_all_models_generate_rjsf(self, model_name: str, model_cls: Type[BaseModel]):
|
||||
"""Every model in the registry should generate valid RJSF format."""
|
||||
# Skip non-EnviPyModel classes (parsers, etc.)
|
||||
if not issubclass(model_cls, EnviPyModel):
|
||||
pytest.skip(f"{model_name} is not an EnviPyModel")
|
||||
|
||||
# Should not raise exception
|
||||
result = build_rjsf_output(model_cls)
|
||||
|
||||
# Verify structure
|
||||
assert isinstance(result, dict), f"{model_name}: Result should be a dict"
|
||||
assert "schema" in result, f"{model_name}: Missing 'schema' key"
|
||||
assert "uiSchema" in result, f"{model_name}: Missing 'uiSchema' key"
|
||||
assert "formData" in result, f"{model_name}: Missing 'formData' key"
|
||||
assert "groups" in result, f"{model_name}: Missing 'groups' key"
|
||||
|
||||
# Verify types
|
||||
assert isinstance(result["schema"], dict), f"{model_name}: schema should be dict"
|
||||
assert isinstance(result["uiSchema"], dict), f"{model_name}: uiSchema should be dict"
|
||||
assert isinstance(result["formData"], dict), f"{model_name}: formData should be dict"
|
||||
assert isinstance(result["groups"], list), f"{model_name}: groups should be list"
|
||||
|
||||
# Verify schema has properties
|
||||
assert "properties" in result["schema"], f"{model_name}: schema should have 'properties'"
|
||||
assert isinstance(result["schema"]["properties"], dict), (
|
||||
f"{model_name}: properties should be dict"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("model_name,model_cls", list(registry.list_models().items()))
|
||||
def test_ui_schema_matches_schema_fields(self, model_name: str, model_cls: Type[BaseModel]):
|
||||
"""uiSchema keys should match schema properties (or be nested for intervals)."""
|
||||
if not issubclass(model_cls, EnviPyModel):
|
||||
pytest.skip(f"{model_name} is not an EnviPyModel")
|
||||
|
||||
result = build_rjsf_output(model_cls)
|
||||
schema_props = set(result["schema"]["properties"].keys())
|
||||
ui_schema_keys = set(result["uiSchema"].keys())
|
||||
|
||||
# uiSchema should have entries for all top-level properties
|
||||
# (intervals may have nested start/end, but the main field should be present)
|
||||
assert ui_schema_keys.issubset(schema_props), (
|
||||
f"{model_name}: uiSchema has keys not in schema: {ui_schema_keys - schema_props}"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("model_name,model_cls", list(registry.list_models().items()))
|
||||
def test_groups_is_list_of_strings(self, model_name: str, model_cls: Type[BaseModel]):
|
||||
"""Groups should be a list of strings."""
|
||||
if not issubclass(model_cls, EnviPyModel):
|
||||
pytest.skip(f"{model_name} is not an EnviPyModel")
|
||||
|
||||
result = build_rjsf_output(model_cls)
|
||||
groups = result["groups"]
|
||||
|
||||
assert isinstance(groups, list), f"{model_name}: groups should be list"
|
||||
assert all(isinstance(g, str) for g in groups), (
|
||||
f"{model_name}: all groups should be strings, got {groups}"
|
||||
)
|
||||
assert len(groups) > 0, f"{model_name}: should have at least one group"
|
||||
|
||||
@pytest.mark.parametrize("model_name,model_cls", list(registry.list_models().items()))
|
||||
def test_form_data_matches_schema(self, model_name: str, model_cls: Type[BaseModel]):
|
||||
"""formData keys should match schema properties."""
|
||||
if not issubclass(model_cls, EnviPyModel):
|
||||
pytest.skip(f"{model_name} is not an EnviPyModel")
|
||||
|
||||
result = build_rjsf_output(model_cls)
|
||||
schema_props = set(result["schema"]["properties"].keys())
|
||||
form_data_keys = set(result["formData"].keys())
|
||||
|
||||
# formData should only contain keys that are in schema
|
||||
assert form_data_keys.issubset(schema_props), (
|
||||
f"{model_name}: formData has keys not in schema: {form_data_keys - schema_props}"
|
||||
)
|
||||
|
||||
|
||||
class TestWidgetTypes:
|
||||
"""Test that widget types are valid."""
|
||||
|
||||
@pytest.mark.parametrize("model_name,model_cls", list(registry.list_models().items()))
|
||||
def test_widget_types_are_valid(self, model_name: str, model_cls: Type[BaseModel]):
|
||||
"""All widget types in uiSchema should be valid WidgetType values."""
|
||||
from envipy_additional_information.ui_config import WidgetType
|
||||
|
||||
if not issubclass(model_cls, EnviPyModel):
|
||||
pytest.skip(f"{model_name} is not an EnviPyModel")
|
||||
|
||||
result = build_rjsf_output(model_cls)
|
||||
valid_widgets = {wt.value for wt in WidgetType}
|
||||
|
||||
for field_name, ui_config in result["uiSchema"].items():
|
||||
widget = ui_config.get("ui:widget")
|
||||
if widget:
|
||||
assert widget in valid_widgets, (
|
||||
f"{model_name}.{field_name}: Invalid widget '{widget}'. Valid: {valid_widgets}"
|
||||
)
|
||||
@ -1,94 +0,0 @@
|
||||
from datetime import timedelta
|
||||
|
||||
from django.test import TestCase, tag
|
||||
from django.utils import timezone
|
||||
|
||||
from epdb.logic import PackageManager, UserManager
|
||||
from epdb.models import APIToken
|
||||
|
||||
|
||||
@tag("api", "auth")
|
||||
class BearerTokenAuthTests(TestCase):
|
||||
@classmethod
|
||||
def setUpTestData(cls):
|
||||
cls.user = UserManager.create_user(
|
||||
"token-user",
|
||||
"token-user@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
|
||||
default_pkg = cls.user.default_package
|
||||
cls.user.default_package = None
|
||||
cls.user.save()
|
||||
if default_pkg:
|
||||
default_pkg.delete()
|
||||
|
||||
cls.unreviewed_package = PackageManager.create_package(
|
||||
cls.user, "Token Auth Package", "Package for token auth tests"
|
||||
)
|
||||
|
||||
def _auth_header(self, raw_token):
|
||||
return {"HTTP_AUTHORIZATION": f"Bearer {raw_token}"}
|
||||
|
||||
def test_valid_token_allows_access(self):
|
||||
_, raw_token = APIToken.create_token(self.user, name="Valid Token", expires_days=1)
|
||||
|
||||
response = self.client.get("/api/v1/compounds/", **self._auth_header(raw_token))
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
def test_expired_token_rejected(self):
|
||||
token, raw_token = APIToken.create_token(self.user, name="Expired Token", expires_days=1)
|
||||
token.expires_at = timezone.now() - timedelta(days=1)
|
||||
token.save(update_fields=["expires_at"])
|
||||
|
||||
response = self.client.get("/api/v1/compounds/", **self._auth_header(raw_token))
|
||||
|
||||
self.assertEqual(response.status_code, 401)
|
||||
|
||||
def test_inactive_token_rejected(self):
|
||||
token, raw_token = APIToken.create_token(self.user, name="Inactive Token", expires_days=1)
|
||||
token.is_active = False
|
||||
token.save(update_fields=["is_active"])
|
||||
|
||||
response = self.client.get("/api/v1/compounds/", **self._auth_header(raw_token))
|
||||
|
||||
self.assertEqual(response.status_code, 401)
|
||||
|
||||
def test_invalid_token_rejected(self):
|
||||
response = self.client.get("/api/v1/compounds/", HTTP_AUTHORIZATION="Bearer invalid-token")
|
||||
|
||||
self.assertEqual(response.status_code, 401)
|
||||
|
||||
def test_no_token_rejected(self):
|
||||
self.client.logout()
|
||||
response = self.client.get("/api/v1/compounds/")
|
||||
|
||||
self.assertEqual(response.status_code, 401)
|
||||
|
||||
def test_bearer_populates_request_user_for_packages(self):
|
||||
response = self.client.get("/api/v1/packages/")
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
uuids = {item["uuid"] for item in payload["items"]}
|
||||
self.assertNotIn(str(self.unreviewed_package.uuid), uuids)
|
||||
|
||||
_, raw_token = APIToken.create_token(self.user, name="Package Token", expires_days=1)
|
||||
response = self.client.get("/api/v1/packages/", **self._auth_header(raw_token))
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
uuids = {item["uuid"] for item in payload["items"]}
|
||||
self.assertIn(str(self.unreviewed_package.uuid), uuids)
|
||||
|
||||
def test_session_auth_still_works_without_bearer(self):
|
||||
self.client.force_login(self.user)
|
||||
response = self.client.get("/api/v1/packages/")
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
uuids = {item["uuid"] for item in payload["items"]}
|
||||
self.assertIn(str(self.unreviewed_package.uuid), uuids)
|
||||
@ -1,181 +0,0 @@
|
||||
"""
|
||||
Schema transformation utilities for converting Pydantic models to RJSF format.
|
||||
|
||||
This module provides functions to extract UI configuration from Pydantic models
|
||||
and transform them into React JSON Schema Form (RJSF) compatible format.
|
||||
"""
|
||||
|
||||
from typing import Type, Optional, Any
|
||||
|
||||
import jsonref
|
||||
from pydantic import BaseModel
|
||||
|
||||
from envipy_additional_information.ui_config import UIConfig
|
||||
from envipy_additional_information import registry
|
||||
|
||||
|
||||
def extract_groups(model_cls: Type[BaseModel]) -> list[str]:
|
||||
"""
|
||||
Extract groups from registry-stored group information.
|
||||
|
||||
Args:
|
||||
model_cls: The model class
|
||||
|
||||
Returns:
|
||||
List of group names the model belongs to
|
||||
"""
|
||||
return registry.get_groups(model_cls)
|
||||
|
||||
|
||||
def extract_ui_metadata(model_cls: Type[BaseModel]) -> dict[str, Any]:
|
||||
"""
|
||||
Extract model-level UI metadata from UI class.
|
||||
|
||||
Returns metadata attributes that are NOT UIConfig instances.
|
||||
Common metadata includes: unit, description, title.
|
||||
"""
|
||||
metadata: dict[str, Any] = {}
|
||||
|
||||
if not hasattr(model_cls, "UI"):
|
||||
return metadata
|
||||
|
||||
ui_class = getattr(model_cls, "UI")
|
||||
|
||||
# Iterate over all attributes in the UI class
|
||||
for attr_name in dir(ui_class):
|
||||
# Skip private attributes
|
||||
if attr_name.startswith("_"):
|
||||
continue
|
||||
|
||||
# Get the attribute value
|
||||
try:
|
||||
attr_value = getattr(ui_class, attr_name)
|
||||
except AttributeError:
|
||||
continue
|
||||
|
||||
# Skip callables but keep types/classes
|
||||
if callable(attr_value) and not isinstance(attr_value, type):
|
||||
continue
|
||||
|
||||
# Skip UIConfig instances (these are field-level configs, not metadata)
|
||||
# This includes both UIConfig and IntervalConfig
|
||||
if isinstance(attr_value, UIConfig):
|
||||
continue
|
||||
|
||||
metadata[attr_name] = attr_value
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
def extract_ui_config_from_model(model_cls: Type[BaseModel]) -> dict[str, Any]:
|
||||
"""
|
||||
Extract UI configuration from model's UI class.
|
||||
|
||||
Returns a dictionary mapping field names to their UI schema configurations.
|
||||
Trusts the config classes to handle their own transformation logic.
|
||||
"""
|
||||
ui_configs: dict[str, Any] = {}
|
||||
|
||||
if not hasattr(model_cls, "UI"):
|
||||
return ui_configs
|
||||
|
||||
ui_class = getattr(model_cls, "UI")
|
||||
schema = model_cls.model_json_schema()
|
||||
field_names = schema.get("properties", {}).keys()
|
||||
|
||||
# Extract config for each field
|
||||
for field_name in field_names:
|
||||
# Skip if UI config doesn't exist for this field (field may be hidden from UI)
|
||||
if not hasattr(ui_class, field_name):
|
||||
continue
|
||||
|
||||
ui_config = getattr(ui_class, field_name)
|
||||
|
||||
if isinstance(ui_config, UIConfig):
|
||||
ui_configs[field_name] = ui_config.to_ui_schema_field()
|
||||
|
||||
return ui_configs
|
||||
|
||||
|
||||
def build_ui_schema(model_cls: Type[BaseModel]) -> dict:
|
||||
"""Generate RJSF uiSchema from model's UI class."""
|
||||
ui_schema = {}
|
||||
|
||||
# Extract field-level UI configs
|
||||
field_configs = extract_ui_config_from_model(model_cls)
|
||||
|
||||
for field_name, config in field_configs.items():
|
||||
ui_schema[field_name] = config
|
||||
|
||||
return ui_schema
|
||||
|
||||
|
||||
def build_schema(model_cls: Type[BaseModel]) -> dict[str, Any]:
|
||||
"""
|
||||
Build JSON schema from Pydantic model, applying UI metadata.
|
||||
|
||||
Dereferences all $ref pointers to produce fully inlined schema.
|
||||
This ensures the frontend receives schemas with enum values and nested
|
||||
properties fully resolved, without needing client-side ref resolution.
|
||||
|
||||
Extracts model-level metadata from UI class (title, unit, etc.) and applies
|
||||
it to the generated schema. This ensures UI metadata is the single source of truth.
|
||||
"""
|
||||
schema = model_cls.model_json_schema()
|
||||
|
||||
# Dereference $ref pointers (inlines $defs) using jsonref
|
||||
# This ensures the frontend receives schemas with enum values and nested
|
||||
# properties fully resolved, currently necessary for client-side rendering.
|
||||
# FIXME: This is a hack to get the schema to work with alpine schema-form.js replace once we migrate to client-side framework.
|
||||
schema = jsonref.replace_refs(schema, proxies=False)
|
||||
|
||||
# Remove $defs section as all refs are now inlined
|
||||
if "$defs" in schema:
|
||||
del schema["$defs"]
|
||||
|
||||
# Extract and apply UI metadata (title, unit, description, etc.)
|
||||
ui_metadata = extract_ui_metadata(model_cls)
|
||||
|
||||
# Apply all metadata consistently as custom properties with x- prefix
|
||||
# This ensures consistency and avoids conflicts with standard JSON Schema properties
|
||||
for key, value in ui_metadata.items():
|
||||
if value is not None:
|
||||
schema[f"x-{key}"] = value
|
||||
|
||||
# Set standard title property from UI metadata for JSON Schema compliance
|
||||
if "title" in ui_metadata:
|
||||
schema["title"] = ui_metadata["title"]
|
||||
elif "label" in ui_metadata:
|
||||
schema["title"] = ui_metadata["label"]
|
||||
|
||||
return schema
|
||||
|
||||
|
||||
def build_rjsf_output(model_cls: Type[BaseModel], initial_data: Optional[dict] = None) -> dict:
|
||||
"""
|
||||
Main function that returns complete RJSF format.
|
||||
|
||||
Trusts the config classes to handle their own transformation logic.
|
||||
No special-case handling - if a config knows how to transform itself, it will.
|
||||
|
||||
Returns:
|
||||
dict with keys: schema, uiSchema, formData, groups
|
||||
"""
|
||||
# Build schema with UI metadata applied
|
||||
schema = build_schema(model_cls)
|
||||
|
||||
# Build UI schema - config classes handle their own transformation
|
||||
ui_schema = build_ui_schema(model_cls)
|
||||
|
||||
# Extract groups from marker interfaces
|
||||
groups = extract_groups(model_cls)
|
||||
|
||||
# Use provided initial_data or empty dict
|
||||
form_data = initial_data if initial_data is not None else {}
|
||||
|
||||
return {
|
||||
"schema": schema,
|
||||
"uiSchema": ui_schema,
|
||||
"formData": form_data,
|
||||
"groups": groups,
|
||||
}
|
||||
@ -1,82 +0,0 @@
|
||||
"""Shared utilities for handling Pydantic validation errors."""
|
||||
|
||||
import json
|
||||
from pydantic import ValidationError
|
||||
from pydantic_core import ErrorDetails
|
||||
from ninja.errors import HttpError
|
||||
|
||||
|
||||
def format_validation_error(error: ErrorDetails) -> str:
|
||||
"""Format a Pydantic validation error into a user-friendly message.
|
||||
|
||||
Args:
|
||||
error: A Pydantic error details dictionary containing 'msg', 'type', 'ctx', etc.
|
||||
|
||||
Returns:
|
||||
A user-friendly error message string.
|
||||
"""
|
||||
msg = error.get("msg") or "Invalid value"
|
||||
error_type = error.get("type") or ""
|
||||
|
||||
# Handle common validation types with friendly messages
|
||||
if error_type == "enum":
|
||||
ctx = error.get("ctx", {})
|
||||
expected = ctx.get("expected", "") if ctx else ""
|
||||
return f"Please select a valid option{': ' + expected if expected else ''}"
|
||||
elif error_type == "literal_error":
|
||||
# Literal errors (like Literal["active", "inactive"])
|
||||
return msg.replace("Input should be ", "Please enter ")
|
||||
elif error_type == "missing":
|
||||
return "This field is required"
|
||||
elif error_type == "string_type":
|
||||
return "Please enter a valid string"
|
||||
elif error_type == "int_type":
|
||||
return "Please enter a valid int"
|
||||
elif error_type == "int_parsing":
|
||||
return "Please enter a valid int"
|
||||
elif error_type == "float_type":
|
||||
return "Please enter a valid float"
|
||||
elif error_type == "float_parsing":
|
||||
return "Please enter a valid float"
|
||||
elif error_type == "value_error":
|
||||
# Strip "Value error, " prefix from custom validator messages
|
||||
return msg.replace("Value error, ", "")
|
||||
else:
|
||||
# Default: use the message from Pydantic but clean it up
|
||||
return msg.replace("Input should be ", "Please enter ").replace("Value error, ", "")
|
||||
|
||||
|
||||
def handle_validation_error(e: ValidationError) -> None:
|
||||
"""Convert a Pydantic ValidationError into a structured HttpError.
|
||||
|
||||
This function transforms Pydantic validation errors into a JSON structure
|
||||
that the frontend expects for displaying field-level errors.
|
||||
|
||||
Args:
|
||||
e: The Pydantic ValidationError to handle.
|
||||
|
||||
Raises:
|
||||
HttpError: Always raises a 400 error with structured JSON containing
|
||||
type, field_errors, and message fields.
|
||||
"""
|
||||
# Transform Pydantic validation errors into user-friendly format
|
||||
field_errors: dict[str, list[str]] = {}
|
||||
for error in e.errors():
|
||||
# Get the field name from location tuple
|
||||
loc = error.get("loc", ())
|
||||
field = str(loc[-1]) if loc else "root"
|
||||
|
||||
# Format the error message
|
||||
friendly_msg = format_validation_error(error)
|
||||
|
||||
if field not in field_errors:
|
||||
field_errors[field] = []
|
||||
field_errors[field].append(friendly_msg)
|
||||
|
||||
# Return structured error for frontend parsing
|
||||
error_response = {
|
||||
"type": "validation_error",
|
||||
"field_errors": field_errors,
|
||||
"message": "Please correct the errors below",
|
||||
}
|
||||
raise HttpError(400, json.dumps(error_response))
|
||||
@ -1,34 +0,0 @@
|
||||
import hashlib
|
||||
|
||||
from ninja.security import HttpBearer
|
||||
from ninja.errors import HttpError
|
||||
|
||||
from epdb.models import APIToken
|
||||
|
||||
|
||||
class BearerTokenAuth(HttpBearer):
|
||||
def authenticate(self, request, token):
|
||||
if token is None:
|
||||
return None
|
||||
|
||||
hashed_token = hashlib.sha256(token.encode()).hexdigest()
|
||||
user = APIToken.authenticate(hashed_token, hashed=True)
|
||||
if not user:
|
||||
raise HttpError(401, "Invalid or expired token")
|
||||
|
||||
request.user = user
|
||||
return user
|
||||
|
||||
|
||||
class OptionalBearerTokenAuth:
|
||||
"""Bearer auth that allows unauthenticated access.
|
||||
|
||||
Validates the Bearer token if present (401 on invalid token),
|
||||
otherwise lets the request through for anonymous/session access.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._bearer = BearerTokenAuth()
|
||||
|
||||
def __call__(self, request):
|
||||
return self._bearer(request) or request.user
|
||||
137
epapi/v1/dal.py
137
epapi/v1/dal.py
@ -1,137 +0,0 @@
|
||||
from django.db.models import Model
|
||||
from epdb.logic import PackageManager
|
||||
from epdb.models import CompoundStructure, User, Package, Compound, Scenario
|
||||
from uuid import UUID
|
||||
|
||||
from .errors import EPAPINotFoundError, EPAPIPermissionDeniedError
|
||||
|
||||
|
||||
def get_compound_for_read(user, compound_uuid: UUID):
|
||||
"""
|
||||
Get compound by UUID with permission check.
|
||||
"""
|
||||
try:
|
||||
compound = Compound.objects.get(uuid=compound_uuid)
|
||||
package = compound.package
|
||||
except Compound.DoesNotExist:
|
||||
raise EPAPINotFoundError(f"Compound with UUID {compound_uuid} not found")
|
||||
|
||||
# FIXME: optimize package manager to exclusively work with UUIDs
|
||||
if not user or user.is_anonymous or not PackageManager.readable(user, package):
|
||||
raise EPAPIPermissionDeniedError("Insufficient permissions to access this compound.")
|
||||
|
||||
return compound
|
||||
|
||||
|
||||
def get_package_for_read(user, package_uuid: UUID):
|
||||
"""
|
||||
Get package by UUID with permission check.
|
||||
"""
|
||||
|
||||
# FIXME: update package manager with custom exceptions to avoid manual checks here
|
||||
try:
|
||||
package = Package.objects.get(uuid=package_uuid)
|
||||
except Package.DoesNotExist:
|
||||
raise EPAPINotFoundError(f"Package with UUID {package_uuid} not found")
|
||||
|
||||
# FIXME: optimize package manager to exclusively work with UUIDs
|
||||
if not user or user.is_anonymous or not PackageManager.readable(user, package):
|
||||
raise EPAPIPermissionDeniedError("Insufficient permissions to access this package.")
|
||||
|
||||
return package
|
||||
|
||||
|
||||
def get_package_for_write(user, package_uuid: UUID):
|
||||
"""
|
||||
Get package by UUID with permission check.
|
||||
"""
|
||||
|
||||
# FIXME: update package manager with custom exceptions to avoid manual checks here
|
||||
try:
|
||||
package = Package.objects.get(uuid=package_uuid)
|
||||
except Package.DoesNotExist:
|
||||
raise EPAPINotFoundError(f"Package with UUID {package_uuid} not found")
|
||||
|
||||
# FIXME: optimize package manager to exclusively work with UUIDs
|
||||
if not user or user.is_anonymous or not PackageManager.writable(user, package):
|
||||
raise EPAPIPermissionDeniedError("Insufficient permissions to access this package.")
|
||||
|
||||
return package
|
||||
|
||||
|
||||
def get_scenario_for_read(user, scenario_uuid: UUID):
|
||||
"""Get scenario by UUID with read permission check."""
|
||||
try:
|
||||
scenario = Scenario.objects.select_related("package").get(uuid=scenario_uuid)
|
||||
except Scenario.DoesNotExist:
|
||||
raise EPAPINotFoundError(f"Scenario with UUID {scenario_uuid} not found")
|
||||
|
||||
if not user or user.is_anonymous or not PackageManager.readable(user, scenario.package):
|
||||
raise EPAPIPermissionDeniedError("Insufficient permissions to access this scenario.")
|
||||
|
||||
return scenario
|
||||
|
||||
|
||||
def get_scenario_for_write(user, scenario_uuid: UUID):
|
||||
"""Get scenario by UUID with write permission check."""
|
||||
try:
|
||||
scenario = Scenario.objects.select_related("package").get(uuid=scenario_uuid)
|
||||
except Scenario.DoesNotExist:
|
||||
raise EPAPINotFoundError(f"Scenario with UUID {scenario_uuid} not found")
|
||||
|
||||
if not user or user.is_anonymous or not PackageManager.writable(user, scenario.package):
|
||||
raise EPAPIPermissionDeniedError("Insufficient permissions to modify this scenario.")
|
||||
|
||||
return scenario
|
||||
|
||||
|
||||
def get_user_packages_for_read(user: User | None):
|
||||
"""Get all packages readable by the user."""
|
||||
if not user or user.is_anonymous:
|
||||
return PackageManager.get_reviewed_packages()
|
||||
return PackageManager.get_all_readable_packages(user, include_reviewed=True)
|
||||
|
||||
|
||||
def get_user_entities_for_read(model_class: Model, user: User | None):
|
||||
"""Build queryset for reviewed package entities."""
|
||||
|
||||
if not user or user.is_anonymous:
|
||||
return model_class.objects.filter(package__reviewed=True).select_related("package")
|
||||
|
||||
qs = model_class.objects.filter(
|
||||
package__in=PackageManager.get_all_readable_packages(user, include_reviewed=True)
|
||||
).select_related("package")
|
||||
return qs
|
||||
|
||||
|
||||
def get_package_entities_for_read(model_class: Model, package_uuid: UUID, user: User | None = None):
|
||||
"""Build queryset for specific package entities."""
|
||||
package = get_package_for_read(user, package_uuid)
|
||||
qs = model_class.objects.filter(package=package).select_related("package")
|
||||
return qs
|
||||
|
||||
|
||||
def get_user_structure_for_read(user: User | None):
|
||||
"""Build queryset for structures accessible to the user (via compound->package)."""
|
||||
|
||||
if not user or user.is_anonymous:
|
||||
return CompoundStructure.objects.filter(compound__package__reviewed=True).select_related(
|
||||
"compound__package"
|
||||
)
|
||||
|
||||
qs = CompoundStructure.objects.filter(
|
||||
compound__package__in=PackageManager.get_all_readable_packages(user, include_reviewed=True)
|
||||
).select_related("compound__package")
|
||||
return qs
|
||||
|
||||
|
||||
def get_package_compound_structure_for_read(
|
||||
package_uuid: UUID, compound_uuid: UUID, user: User | None = None
|
||||
):
|
||||
"""Build queryset for specific package compound structures."""
|
||||
|
||||
get_package_for_read(user, package_uuid)
|
||||
compound = get_compound_for_read(user, compound_uuid)
|
||||
|
||||
qs = CompoundStructure.objects.filter(compound=compound).select_related("compound__package")
|
||||
return qs
|
||||
@ -1,174 +0,0 @@
|
||||
from ninja import Router, Body
|
||||
from ninja.errors import HttpError
|
||||
from uuid import UUID
|
||||
from pydantic import ValidationError
|
||||
from typing import Dict, Any
|
||||
import logging
|
||||
|
||||
from envipy_additional_information import registry
|
||||
from envipy_additional_information.groups import GroupEnum
|
||||
from epapi.utils.schema_transformers import build_rjsf_output
|
||||
from epapi.utils.validation_errors import handle_validation_error
|
||||
from epdb.models import AdditionalInformation
|
||||
from ..dal import get_scenario_for_read, get_scenario_for_write
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = Router(tags=["Additional Information"])
|
||||
|
||||
|
||||
@router.get("/information/schema/")
|
||||
def list_all_schemas(request):
|
||||
"""Return all schemas in RJSF format with lowercase class names as keys."""
|
||||
result = {}
|
||||
for name, cls in registry.list_models().items():
|
||||
try:
|
||||
result[name] = build_rjsf_output(cls)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to generate schema for {name}: {e}")
|
||||
continue
|
||||
return result
|
||||
|
||||
|
||||
@router.get("/information/schema/{model_name}/")
|
||||
def get_model_schema(request, model_name: str):
|
||||
"""Return RJSF schema for specific model."""
|
||||
cls = registry.get_model(model_name.lower())
|
||||
if not cls:
|
||||
raise HttpError(404, f"Unknown model: {model_name}")
|
||||
return build_rjsf_output(cls)
|
||||
|
||||
|
||||
@router.get("/scenario/{uuid:scenario_uuid}/information/")
|
||||
def list_scenario_info(request, scenario_uuid: UUID):
|
||||
"""List all additional information for a scenario"""
|
||||
scenario = get_scenario_for_read(request.user, scenario_uuid)
|
||||
|
||||
result = []
|
||||
|
||||
for ai in AdditionalInformation.objects.filter(scenario=scenario):
|
||||
result.append(
|
||||
{
|
||||
"type": ai.get().__class__.__name__,
|
||||
"uuid": getattr(ai, "uuid", None),
|
||||
"data": ai.data,
|
||||
"attach_object": ai.content_object.simple_json() if ai.content_object else None,
|
||||
}
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
@router.post("/scenario/{uuid:scenario_uuid}/information/{model_name}/")
|
||||
def add_scenario_info(
|
||||
request, scenario_uuid: UUID, model_name: str, payload: Dict[str, Any] = Body(...)
|
||||
):
|
||||
"""Add new additional information to scenario"""
|
||||
cls = registry.get_model(model_name.lower())
|
||||
if not cls:
|
||||
raise HttpError(404, f"Unknown model: {model_name}")
|
||||
|
||||
try:
|
||||
instance = cls(**payload) # Pydantic validates
|
||||
except ValidationError as e:
|
||||
handle_validation_error(e)
|
||||
|
||||
scenario = get_scenario_for_write(request.user, scenario_uuid)
|
||||
|
||||
# Model method now returns the UUID
|
||||
created_uuid = scenario.add_additional_information(instance)
|
||||
|
||||
return {"status": "created", "uuid": created_uuid}
|
||||
|
||||
|
||||
@router.patch("/scenario/{uuid:scenario_uuid}/information/item/{uuid:ai_uuid}/")
|
||||
def update_scenario_info(
|
||||
request, scenario_uuid: UUID, ai_uuid: UUID, payload: Dict[str, Any] = Body(...)
|
||||
):
|
||||
"""Update existing additional information for a scenario"""
|
||||
scenario = get_scenario_for_write(request.user, scenario_uuid)
|
||||
ai_uuid_str = str(ai_uuid)
|
||||
|
||||
ai = AdditionalInformation.objects.filter(uuid=ai_uuid_str, scenario=scenario)
|
||||
|
||||
if not ai.exists():
|
||||
raise HttpError(404, f"Additional information with UUID {ai_uuid} not found")
|
||||
|
||||
ai = ai.first()
|
||||
|
||||
# Get the model class for validation
|
||||
cls = registry.get_model(ai.type.lower())
|
||||
if not cls:
|
||||
raise HttpError(500, f"Unknown model type in data: {ai.type}")
|
||||
|
||||
# Validate the payload against the model
|
||||
try:
|
||||
instance = cls(**payload)
|
||||
except ValidationError as e:
|
||||
handle_validation_error(e)
|
||||
|
||||
# Use model method for update
|
||||
try:
|
||||
scenario.update_additional_information(ai_uuid_str, instance)
|
||||
except ValueError as e:
|
||||
raise HttpError(404, str(e))
|
||||
|
||||
return {"status": "updated", "uuid": ai_uuid_str}
|
||||
|
||||
|
||||
@router.delete("/scenario/{uuid:scenario_uuid}/information/item/{uuid:ai_uuid}/")
|
||||
def delete_scenario_info(request, scenario_uuid: UUID, ai_uuid: UUID):
|
||||
"""Delete additional information from scenario"""
|
||||
scenario = get_scenario_for_write(request.user, scenario_uuid)
|
||||
|
||||
try:
|
||||
scenario.remove_additional_information(str(ai_uuid))
|
||||
except ValueError as e:
|
||||
raise HttpError(404, str(e))
|
||||
|
||||
return {"status": "deleted"}
|
||||
|
||||
|
||||
@router.get("/information/groups/")
|
||||
def list_groups(request):
|
||||
"""Return list of available group names."""
|
||||
return {"groups": GroupEnum.values()}
|
||||
|
||||
|
||||
@router.get("/information/groups/{group_name}/")
|
||||
def get_group_models(request, group_name: str):
|
||||
"""
|
||||
Return models for a specific group organized by subcategory.
|
||||
|
||||
Args:
|
||||
group_name: One of "sludge", "soil", or "sediment" (string)
|
||||
|
||||
Returns:
|
||||
Dictionary with subcategories (exp, spike, comp, misc, or group name)
|
||||
as keys and lists of model info as values
|
||||
"""
|
||||
# Convert string to enum (raises ValueError if invalid)
|
||||
try:
|
||||
group_enum = GroupEnum(group_name)
|
||||
except ValueError:
|
||||
valid = ", ".join(GroupEnum.values())
|
||||
raise HttpError(400, f"Invalid group '{group_name}'. Valid: {valid}")
|
||||
|
||||
try:
|
||||
group_data = registry.collect_group(group_enum)
|
||||
except (ValueError, TypeError) as e:
|
||||
raise HttpError(400, str(e))
|
||||
|
||||
result = {}
|
||||
for subcategory, models in group_data.items():
|
||||
result[subcategory] = [
|
||||
{
|
||||
"name": cls.__name__.lower(),
|
||||
"class": cls.__name__,
|
||||
"title": getattr(cls.UI, "title", cls.__name__)
|
||||
if hasattr(cls, "UI")
|
||||
else cls.__name__,
|
||||
}
|
||||
for cls in models
|
||||
]
|
||||
|
||||
return result
|
||||
@ -1,41 +0,0 @@
|
||||
from django.conf import settings as s
|
||||
from ninja import Router
|
||||
from ninja_extra.pagination import paginate
|
||||
from uuid import UUID
|
||||
|
||||
from epdb.models import Compound
|
||||
from ..pagination import EnhancedPageNumberPagination
|
||||
from ..schemas import CompoundOutSchema, ReviewStatusFilter
|
||||
from ..dal import get_user_entities_for_read, get_package_entities_for_read
|
||||
|
||||
router = Router()
|
||||
|
||||
|
||||
@router.get("/compounds/", response=EnhancedPageNumberPagination.Output[CompoundOutSchema])
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_all_compounds(request):
|
||||
"""
|
||||
List all compounds from reviewed packages.
|
||||
"""
|
||||
return get_user_entities_for_read(Compound, request.user).order_by("name").all()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/package/{uuid:package_uuid}/compound/",
|
||||
response=EnhancedPageNumberPagination.Output[CompoundOutSchema],
|
||||
)
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_package_compounds(request, package_uuid: UUID):
|
||||
"""
|
||||
List all compounds for a specific package.
|
||||
"""
|
||||
user = request.user
|
||||
return get_package_entities_for_read(Compound, package_uuid, user).order_by("name").all()
|
||||
@ -1,41 +0,0 @@
|
||||
from django.conf import settings as s
|
||||
from ninja import Router
|
||||
from ninja_extra.pagination import paginate
|
||||
from uuid import UUID
|
||||
|
||||
from epdb.models import EPModel
|
||||
from ..pagination import EnhancedPageNumberPagination
|
||||
from ..schemas import ModelOutSchema, ReviewStatusFilter
|
||||
from ..dal import get_user_entities_for_read, get_package_entities_for_read
|
||||
|
||||
router = Router()
|
||||
|
||||
|
||||
@router.get("/models/", response=EnhancedPageNumberPagination.Output[ModelOutSchema])
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_all_models(request):
|
||||
"""
|
||||
List all models from reviewed packages.
|
||||
"""
|
||||
return get_user_entities_for_read(EPModel, request.user).order_by("name").all()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/package/{uuid:package_uuid}/model/",
|
||||
response=EnhancedPageNumberPagination.Output[ModelOutSchema],
|
||||
)
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_package_models(request, package_uuid: UUID):
|
||||
"""
|
||||
List all models for a specific package.
|
||||
"""
|
||||
user = request.user
|
||||
return get_package_entities_for_read(EPModel, package_uuid, user).order_by("name").all()
|
||||
@ -1,32 +0,0 @@
|
||||
from django.conf import settings as s
|
||||
from ninja import Router
|
||||
from ninja_extra.pagination import paginate
|
||||
import logging
|
||||
|
||||
from ..auth import OptionalBearerTokenAuth
|
||||
from ..dal import get_user_packages_for_read
|
||||
from ..pagination import EnhancedPageNumberPagination
|
||||
from ..schemas import PackageOutSchema, SelfReviewStatusFilter
|
||||
|
||||
router = Router()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/packages/",
|
||||
response=EnhancedPageNumberPagination.Output[PackageOutSchema],
|
||||
auth=OptionalBearerTokenAuth(),
|
||||
)
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=SelfReviewStatusFilter,
|
||||
)
|
||||
def list_all_packages(request):
|
||||
"""
|
||||
List packages accessible to the user.
|
||||
|
||||
"""
|
||||
user = request.user
|
||||
qs = get_user_packages_for_read(user)
|
||||
return qs.order_by("name").all()
|
||||
@ -1,42 +0,0 @@
|
||||
from django.conf import settings as s
|
||||
from ninja import Router
|
||||
from ninja_extra.pagination import paginate
|
||||
from uuid import UUID
|
||||
|
||||
from epdb.models import Pathway
|
||||
from ..pagination import EnhancedPageNumberPagination
|
||||
from ..schemas import PathwayOutSchema, ReviewStatusFilter
|
||||
from ..dal import get_user_entities_for_read, get_package_entities_for_read
|
||||
|
||||
router = Router()
|
||||
|
||||
|
||||
@router.get("/pathways/", response=EnhancedPageNumberPagination.Output[PathwayOutSchema])
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_all_pathways(request):
|
||||
"""
|
||||
List all pathways from reviewed packages.
|
||||
"""
|
||||
user = request.user
|
||||
return get_user_entities_for_read(Pathway, user).order_by("name").all()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/package/{uuid:package_uuid}/pathway/",
|
||||
response=EnhancedPageNumberPagination.Output[PathwayOutSchema],
|
||||
)
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_package_pathways(request, package_uuid: UUID):
|
||||
"""
|
||||
List all pathways for a specific package.
|
||||
"""
|
||||
user = request.user
|
||||
return get_package_entities_for_read(Pathway, package_uuid, user).order_by("name").all()
|
||||
@ -1,42 +0,0 @@
|
||||
from django.conf import settings as s
|
||||
from ninja import Router
|
||||
from ninja_extra.pagination import paginate
|
||||
from uuid import UUID
|
||||
|
||||
from epdb.models import Reaction
|
||||
from ..pagination import EnhancedPageNumberPagination
|
||||
from ..schemas import ReactionOutSchema, ReviewStatusFilter
|
||||
from ..dal import get_user_entities_for_read, get_package_entities_for_read
|
||||
|
||||
router = Router()
|
||||
|
||||
|
||||
@router.get("/reactions/", response=EnhancedPageNumberPagination.Output[ReactionOutSchema])
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_all_reactions(request):
|
||||
"""
|
||||
List all reactions from reviewed packages.
|
||||
"""
|
||||
user = request.user
|
||||
return get_user_entities_for_read(Reaction, user).order_by("name").all()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/package/{uuid:package_uuid}/reaction/",
|
||||
response=EnhancedPageNumberPagination.Output[ReactionOutSchema],
|
||||
)
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_package_reactions(request, package_uuid: UUID):
|
||||
"""
|
||||
List all reactions for a specific package.
|
||||
"""
|
||||
user = request.user
|
||||
return get_package_entities_for_read(Reaction, package_uuid, user).order_by("name").all()
|
||||
@ -1,42 +0,0 @@
|
||||
from django.conf import settings as s
|
||||
from ninja import Router
|
||||
from ninja_extra.pagination import paginate
|
||||
from uuid import UUID
|
||||
|
||||
from epdb.models import Rule
|
||||
from ..pagination import EnhancedPageNumberPagination
|
||||
from ..schemas import ReviewStatusFilter, RuleOutSchema
|
||||
from ..dal import get_user_entities_for_read, get_package_entities_for_read
|
||||
|
||||
router = Router()
|
||||
|
||||
|
||||
@router.get("/rules/", response=EnhancedPageNumberPagination.Output[RuleOutSchema])
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_all_rules(request):
|
||||
"""
|
||||
List all rules from reviewed packages.
|
||||
"""
|
||||
user = request.user
|
||||
return get_user_entities_for_read(Rule, user).order_by("name").all()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/package/{uuid:package_uuid}/rule/",
|
||||
response=EnhancedPageNumberPagination.Output[RuleOutSchema],
|
||||
)
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_package_rules(request, package_uuid: UUID):
|
||||
"""
|
||||
List all rules for a specific package.
|
||||
"""
|
||||
user = request.user
|
||||
return get_package_entities_for_read(Rule, package_uuid, user).order_by("name").all()
|
||||
@ -1,129 +0,0 @@
|
||||
from django.conf import settings as s
|
||||
from django.db import IntegrityError, OperationalError, DatabaseError
|
||||
from ninja import Router, Body
|
||||
from ninja.errors import HttpError
|
||||
from ninja_extra.pagination import paginate
|
||||
from uuid import UUID
|
||||
from pydantic import ValidationError
|
||||
import logging
|
||||
import json
|
||||
|
||||
from epdb.models import Scenario
|
||||
from epdb.views import _anonymous_or_real
|
||||
from ..pagination import EnhancedPageNumberPagination
|
||||
from ..schemas import (
|
||||
ReviewStatusFilter,
|
||||
ScenarioOutSchema,
|
||||
ScenarioCreateSchema,
|
||||
)
|
||||
from ..dal import get_user_entities_for_read, get_package_entities_for_read, get_package_for_write
|
||||
from envipy_additional_information import registry
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = Router()
|
||||
|
||||
|
||||
@router.get("/scenarios/", response=EnhancedPageNumberPagination.Output[ScenarioOutSchema])
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_all_scenarios(request):
|
||||
user = request.user
|
||||
items = get_user_entities_for_read(Scenario, user)
|
||||
return items.order_by("name").all()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/package/{uuid:package_uuid}/scenario/",
|
||||
response=EnhancedPageNumberPagination.Output[ScenarioOutSchema],
|
||||
)
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_package_scenarios(request, package_uuid: UUID):
|
||||
user = request.user
|
||||
items = get_package_entities_for_read(Scenario, package_uuid, user)
|
||||
return items.order_by("name").all()
|
||||
|
||||
|
||||
@router.post("/package/{uuid:package_uuid}/scenario/", response=ScenarioOutSchema)
|
||||
def create_scenario(request, package_uuid: UUID, payload: ScenarioCreateSchema = Body(...)):
|
||||
"""Create a new scenario with optional additional information."""
|
||||
user = _anonymous_or_real(request)
|
||||
|
||||
try:
|
||||
current_package = get_package_for_write(user, package_uuid)
|
||||
except ValueError as e:
|
||||
error_msg = str(e)
|
||||
if "does not exist" in error_msg:
|
||||
raise HttpError(404, f"Package not found: {package_uuid}")
|
||||
elif "Insufficient permissions" in error_msg:
|
||||
raise HttpError(403, "You do not have permission to access this package")
|
||||
else:
|
||||
logger.error(f"Unexpected ValueError from get_package_by_id: {error_msg}")
|
||||
raise HttpError(400, "Invalid package request")
|
||||
|
||||
# Build additional information models from payload
|
||||
additional_information_models = []
|
||||
validation_errors = []
|
||||
|
||||
for ai_item in payload.additional_information:
|
||||
# Get model class from registry
|
||||
model_cls = registry.get_model(ai_item.type.lower())
|
||||
if not model_cls:
|
||||
validation_errors.append(f"Unknown additional information type: {ai_item.type}")
|
||||
continue
|
||||
|
||||
try:
|
||||
# Validate and create model instance
|
||||
instance = model_cls(**ai_item.data)
|
||||
additional_information_models.append(instance)
|
||||
except ValidationError as e:
|
||||
# Collect validation errors to return to user
|
||||
error_messages = [err.get("msg", "Validation error") for err in e.errors()]
|
||||
validation_errors.append(f"{ai_item.type}: {', '.join(error_messages)}")
|
||||
except (TypeError, AttributeError, KeyError) as e:
|
||||
logger.warning(f"Failed to instantiate {ai_item.type} model: {str(e)}")
|
||||
validation_errors.append(f"{ai_item.type}: Invalid data structure - {str(e)}")
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error instantiating {ai_item.type}: {str(e)}")
|
||||
validation_errors.append(f"{ai_item.type}: Failed to process - please check your data")
|
||||
|
||||
# If there are validation errors, return them
|
||||
if validation_errors:
|
||||
raise HttpError(
|
||||
400,
|
||||
json.dumps(
|
||||
{
|
||||
"error": "Validation errors in additional information",
|
||||
"details": validation_errors,
|
||||
}
|
||||
),
|
||||
)
|
||||
|
||||
# Create scenario using the existing Scenario.create method
|
||||
try:
|
||||
new_scenario = Scenario.create(
|
||||
package=current_package,
|
||||
name=payload.name,
|
||||
description=payload.description,
|
||||
scenario_date=payload.scenario_date,
|
||||
scenario_type=payload.scenario_type,
|
||||
additional_information=additional_information_models,
|
||||
)
|
||||
except IntegrityError as e:
|
||||
logger.error(f"Database integrity error creating scenario: {str(e)}")
|
||||
raise HttpError(400, "Scenario creation failed - data constraint violation")
|
||||
except OperationalError as e:
|
||||
logger.error(f"Database operational error creating scenario: {str(e)}")
|
||||
raise HttpError(503, "Database temporarily unavailable - please try again")
|
||||
except (DatabaseError, AttributeError) as e:
|
||||
logger.error(f"Error creating scenario: {str(e)}")
|
||||
raise HttpError(500, "Failed to create scenario due to database error")
|
||||
|
||||
return new_scenario
|
||||
@ -1,23 +0,0 @@
|
||||
from django.conf import settings as s
|
||||
from ninja import Router
|
||||
from ninja_extra.pagination import paginate
|
||||
|
||||
from epdb.logic import SettingManager
|
||||
|
||||
from ..pagination import EnhancedPageNumberPagination
|
||||
from ..schemas import SettingOutSchema
|
||||
|
||||
router = Router()
|
||||
|
||||
|
||||
@router.get("/settings/", response=EnhancedPageNumberPagination.Output[SettingOutSchema])
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
)
|
||||
def list_all_pathways(request):
|
||||
"""
|
||||
List all pathways from reviewed packages.
|
||||
"""
|
||||
user = request.user
|
||||
return SettingManager.get_all_settings(user)
|
||||
@ -1,50 +0,0 @@
|
||||
from django.conf import settings as s
|
||||
from ninja import Router
|
||||
from ninja_extra.pagination import paginate
|
||||
from uuid import UUID
|
||||
|
||||
from ..pagination import EnhancedPageNumberPagination
|
||||
from ..schemas import CompoundStructureOutSchema, StructureReviewStatusFilter
|
||||
from ..dal import (
|
||||
get_user_structure_for_read,
|
||||
get_package_compound_structure_for_read,
|
||||
)
|
||||
|
||||
router = Router()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/structures/", response=EnhancedPageNumberPagination.Output[CompoundStructureOutSchema]
|
||||
)
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=StructureReviewStatusFilter,
|
||||
)
|
||||
def list_all_structures(request):
|
||||
"""
|
||||
List all structures from all packages.
|
||||
"""
|
||||
user = request.user
|
||||
return get_user_structure_for_read(user).order_by("name").all()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/package/{uuid:package_uuid}/compound/{uuid:compound_uuid}/structure/",
|
||||
response=EnhancedPageNumberPagination.Output[CompoundStructureOutSchema],
|
||||
)
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=StructureReviewStatusFilter,
|
||||
)
|
||||
def list_package_structures(request, package_uuid: UUID, compound_uuid: UUID):
|
||||
"""
|
||||
List all structures for a specific package and compound.
|
||||
"""
|
||||
user = request.user
|
||||
return (
|
||||
get_package_compound_structure_for_read(package_uuid, compound_uuid, user)
|
||||
.order_by("name")
|
||||
.all()
|
||||
)
|
||||
@ -1,28 +0,0 @@
|
||||
from ninja.errors import HttpError
|
||||
|
||||
|
||||
class EPAPIError(HttpError):
|
||||
status_code: int = 500
|
||||
|
||||
def __init__(self, message: str) -> None:
|
||||
super().__init__(status_code=self.status_code, message=message)
|
||||
|
||||
@classmethod
|
||||
def from_exception(cls, exc: Exception):
|
||||
return cls(message=str(exc))
|
||||
|
||||
|
||||
class EPAPIUnauthorizedError(EPAPIError):
|
||||
status_code = 401
|
||||
|
||||
|
||||
class EPAPIPermissionDeniedError(EPAPIError):
|
||||
status_code = 403
|
||||
|
||||
|
||||
class EPAPINotFoundError(EPAPIError):
|
||||
status_code = 404
|
||||
|
||||
|
||||
class EPAPIValidationError(EPAPIError):
|
||||
status_code = 422
|
||||
@ -1,60 +0,0 @@
|
||||
import math
|
||||
from typing import Any, Generic, List, TypeVar
|
||||
|
||||
from django.db.models import QuerySet
|
||||
from ninja import Schema
|
||||
from ninja.pagination import PageNumberPagination
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
class EnhancedPageNumberPagination(PageNumberPagination):
|
||||
class Output(Schema, Generic[T]):
|
||||
items: List[T]
|
||||
page: int
|
||||
page_size: int
|
||||
total_items: int
|
||||
total_pages: int
|
||||
|
||||
def paginate_queryset(
|
||||
self,
|
||||
queryset: QuerySet,
|
||||
pagination: PageNumberPagination.Input,
|
||||
**params: Any,
|
||||
) -> Any:
|
||||
page_size = self._get_page_size(pagination.page_size)
|
||||
offset = (pagination.page - 1) * page_size
|
||||
total_items = self._items_count(queryset)
|
||||
total_pages = math.ceil(total_items / page_size) if page_size > 0 else 0
|
||||
|
||||
return {
|
||||
"items": queryset[offset : offset + page_size],
|
||||
"page": pagination.page,
|
||||
"page_size": page_size,
|
||||
"total_items": total_items,
|
||||
"total_pages": total_pages,
|
||||
}
|
||||
|
||||
async def apaginate_queryset(
|
||||
self,
|
||||
queryset: QuerySet,
|
||||
pagination: PageNumberPagination.Input,
|
||||
**params: Any,
|
||||
) -> Any:
|
||||
page_size = self._get_page_size(pagination.page_size)
|
||||
offset = (pagination.page - 1) * page_size
|
||||
total_items = await self._aitems_count(queryset)
|
||||
total_pages = math.ceil(total_items / page_size) if page_size > 0 else 0
|
||||
|
||||
if isinstance(queryset, QuerySet):
|
||||
items = [obj async for obj in queryset[offset : offset + page_size]]
|
||||
else:
|
||||
items = queryset[offset : offset + page_size]
|
||||
|
||||
return {
|
||||
"items": items,
|
||||
"page": pagination.page,
|
||||
"page_size": page_size,
|
||||
"total_items": total_items,
|
||||
"total_pages": total_pages,
|
||||
}
|
||||
@ -1,36 +0,0 @@
|
||||
from ninja import Router
|
||||
from ninja.security import SessionAuth
|
||||
|
||||
from .auth import BearerTokenAuth
|
||||
from .endpoints import (
|
||||
packages,
|
||||
scenarios,
|
||||
compounds,
|
||||
rules,
|
||||
reactions,
|
||||
pathways,
|
||||
models,
|
||||
structure,
|
||||
additional_information,
|
||||
settings,
|
||||
)
|
||||
|
||||
# Main router with authentication
|
||||
router = Router(
|
||||
auth=[
|
||||
SessionAuth(),
|
||||
BearerTokenAuth(),
|
||||
]
|
||||
)
|
||||
|
||||
# Include all endpoint routers
|
||||
router.add_router("", packages.router)
|
||||
router.add_router("", scenarios.router)
|
||||
router.add_router("", compounds.router)
|
||||
router.add_router("", rules.router)
|
||||
router.add_router("", reactions.router)
|
||||
router.add_router("", pathways.router)
|
||||
router.add_router("", models.router)
|
||||
router.add_router("", structure.router)
|
||||
router.add_router("", additional_information.router)
|
||||
router.add_router("", settings.router)
|
||||
@ -1,128 +0,0 @@
|
||||
from ninja import FilterSchema, FilterLookup, Schema
|
||||
from typing import Annotated, Optional, List, Dict, Any
|
||||
from uuid import UUID
|
||||
|
||||
|
||||
# Filter schema for query parameters
|
||||
class ReviewStatusFilter(FilterSchema):
|
||||
"""Filter schema for review_status query parameter."""
|
||||
|
||||
review_status: Annotated[Optional[bool], FilterLookup("package__reviewed")] = None
|
||||
|
||||
|
||||
class SelfReviewStatusFilter(FilterSchema):
|
||||
"""Filter schema for review_status query parameter on self-reviewed entities."""
|
||||
|
||||
review_status: Annotated[Optional[bool], FilterLookup("reviewed")] = None
|
||||
|
||||
|
||||
class StructureReviewStatusFilter(FilterSchema):
|
||||
"""Filter schema for review_status on structures (via compound->package)."""
|
||||
|
||||
review_status: Annotated[Optional[bool], FilterLookup("compound__package__reviewed")] = None
|
||||
|
||||
|
||||
# Base schema for all package-scoped entities
|
||||
class PackageEntityOutSchema(Schema):
|
||||
"""Base schema for entities belonging to a package."""
|
||||
|
||||
uuid: UUID
|
||||
url: str = ""
|
||||
name: str
|
||||
description: str
|
||||
review_status: str = ""
|
||||
package: str = ""
|
||||
|
||||
@staticmethod
|
||||
def resolve_url(obj):
|
||||
return obj.url
|
||||
|
||||
@staticmethod
|
||||
def resolve_package(obj):
|
||||
return obj.package.url
|
||||
|
||||
@staticmethod
|
||||
def resolve_review_status(obj):
|
||||
return "reviewed" if obj.package.reviewed else "unreviewed"
|
||||
|
||||
|
||||
# All package-scoped entities inherit from base
|
||||
class ScenarioOutSchema(PackageEntityOutSchema):
|
||||
pass
|
||||
|
||||
|
||||
class AdditionalInformationItemSchema(Schema):
|
||||
"""Schema for additional information item in scenario creation."""
|
||||
|
||||
type: str
|
||||
data: Dict[str, Any]
|
||||
|
||||
|
||||
class ScenarioCreateSchema(Schema):
|
||||
"""Schema for creating a new scenario."""
|
||||
|
||||
name: str
|
||||
description: str = ""
|
||||
scenario_date: str = "No date"
|
||||
scenario_type: str = "Not specified"
|
||||
additional_information: List[AdditionalInformationItemSchema] = []
|
||||
|
||||
|
||||
class CompoundOutSchema(PackageEntityOutSchema):
|
||||
pass
|
||||
|
||||
|
||||
class RuleOutSchema(PackageEntityOutSchema):
|
||||
pass
|
||||
|
||||
|
||||
class ReactionOutSchema(PackageEntityOutSchema):
|
||||
pass
|
||||
|
||||
|
||||
class PathwayOutSchema(PackageEntityOutSchema):
|
||||
pass
|
||||
|
||||
|
||||
class ModelOutSchema(PackageEntityOutSchema):
|
||||
pass
|
||||
|
||||
|
||||
class CompoundStructureOutSchema(PackageEntityOutSchema):
|
||||
compound: str = ""
|
||||
|
||||
@staticmethod
|
||||
def resolve_compound(obj):
|
||||
return obj.compound.url
|
||||
|
||||
@staticmethod
|
||||
def resolve_package(obj):
|
||||
return obj.compound.package.url
|
||||
|
||||
@staticmethod
|
||||
def resolve_review_status(obj):
|
||||
return "reviewed" if obj.compound.package.reviewed else "unreviewed"
|
||||
|
||||
|
||||
# Package is special (no package FK)
|
||||
class PackageOutSchema(Schema):
|
||||
uuid: UUID
|
||||
url: str = ""
|
||||
name: str
|
||||
description: str
|
||||
review_status: str = ""
|
||||
|
||||
@staticmethod
|
||||
def resolve_url(obj):
|
||||
return obj.url
|
||||
|
||||
@staticmethod
|
||||
def resolve_review_status(obj):
|
||||
return "reviewed" if obj.reviewed else "unreviewed"
|
||||
|
||||
|
||||
class SettingOutSchema(Schema):
|
||||
uuid: UUID
|
||||
url: str = ""
|
||||
name: str
|
||||
description: str
|
||||
@ -1,48 +1,31 @@
|
||||
from django.conf import settings as s
|
||||
from django.contrib import admin
|
||||
|
||||
from .models import (
|
||||
AdditionalInformation,
|
||||
Compound,
|
||||
CompoundStructure,
|
||||
Edge,
|
||||
EnviFormer,
|
||||
ExternalDatabase,
|
||||
ExternalIdentifier,
|
||||
Group,
|
||||
GroupPackagePermission,
|
||||
JobLog,
|
||||
License,
|
||||
MLRelativeReasoning,
|
||||
Node,
|
||||
ParallelRule,
|
||||
Pathway,
|
||||
PropertyPluginModel,
|
||||
Reaction,
|
||||
Scenario,
|
||||
Setting,
|
||||
SimpleAmbitRule,
|
||||
User,
|
||||
UserPackagePermission,
|
||||
Group,
|
||||
GroupPackagePermission,
|
||||
Package,
|
||||
MLRelativeReasoning,
|
||||
EnviFormer,
|
||||
Compound,
|
||||
CompoundStructure,
|
||||
SimpleAmbitRule,
|
||||
ParallelRule,
|
||||
Reaction,
|
||||
Pathway,
|
||||
Node,
|
||||
Edge,
|
||||
Scenario,
|
||||
Setting,
|
||||
ExternalDatabase,
|
||||
ExternalIdentifier,
|
||||
JobLog,
|
||||
)
|
||||
|
||||
Package = s.GET_PACKAGE_MODEL()
|
||||
|
||||
|
||||
class AdditionalInformationAdmin(admin.ModelAdmin):
|
||||
pass
|
||||
|
||||
|
||||
class UserAdmin(admin.ModelAdmin):
|
||||
list_display = [
|
||||
"username",
|
||||
"email",
|
||||
"is_active",
|
||||
"is_staff",
|
||||
"is_superuser",
|
||||
"last_login",
|
||||
"date_joined",
|
||||
]
|
||||
list_display = ["username", "email", "is_active"]
|
||||
|
||||
|
||||
class UserPackagePermissionAdmin(admin.ModelAdmin):
|
||||
@ -62,7 +45,7 @@ class JobLogAdmin(admin.ModelAdmin):
|
||||
|
||||
|
||||
class EPAdmin(admin.ModelAdmin):
|
||||
search_fields = ["name", "description", "url", "uuid"]
|
||||
search_fields = ["name", "description"]
|
||||
list_display = ["name", "url", "created"]
|
||||
ordering = ["-created"]
|
||||
|
||||
@ -79,14 +62,6 @@ class EnviFormerAdmin(EPAdmin):
|
||||
pass
|
||||
|
||||
|
||||
class PropertyPluginModelAdmin(admin.ModelAdmin):
|
||||
pass
|
||||
|
||||
|
||||
class LicenseAdmin(admin.ModelAdmin):
|
||||
list_display = ["cc_string", "link", "image_link"]
|
||||
|
||||
|
||||
class CompoundAdmin(EPAdmin):
|
||||
pass
|
||||
|
||||
@ -135,7 +110,6 @@ class ExternalIdentifierAdmin(admin.ModelAdmin):
|
||||
pass
|
||||
|
||||
|
||||
admin.site.register(AdditionalInformation, AdditionalInformationAdmin)
|
||||
admin.site.register(User, UserAdmin)
|
||||
admin.site.register(UserPackagePermission, UserPackagePermissionAdmin)
|
||||
admin.site.register(Group, GroupAdmin)
|
||||
@ -144,8 +118,6 @@ admin.site.register(JobLog, JobLogAdmin)
|
||||
admin.site.register(Package, PackageAdmin)
|
||||
admin.site.register(MLRelativeReasoning, MLRelativeReasoningAdmin)
|
||||
admin.site.register(EnviFormer, EnviFormerAdmin)
|
||||
admin.site.register(PropertyPluginModel, PropertyPluginModelAdmin)
|
||||
admin.site.register(License, LicenseAdmin)
|
||||
admin.site.register(Compound, CompoundAdmin)
|
||||
admin.site.register(CompoundStructure, CompoundStructureAdmin)
|
||||
admin.site.register(SimpleAmbitRule, SimpleAmbitRuleAdmin)
|
||||
|
||||
14
epdb/api.py
14
epdb/api.py
@ -2,12 +2,20 @@ from typing import List
|
||||
|
||||
from django.contrib.auth import get_user_model
|
||||
from ninja import Router, Schema, Field
|
||||
from ninja.errors import HttpError
|
||||
from ninja.pagination import paginate
|
||||
|
||||
from epapi.v1.auth import BearerTokenAuth
|
||||
from ninja.security import HttpBearer
|
||||
|
||||
from .logic import PackageManager
|
||||
from .models import User, Compound
|
||||
from .models import User, Compound, APIToken
|
||||
|
||||
|
||||
class BearerTokenAuth(HttpBearer):
|
||||
def authenticate(self, request, token):
|
||||
for token_obj in APIToken.objects.select_related("user").all():
|
||||
if token_obj.check_token(token) and token_obj.is_valid():
|
||||
return token_obj.user
|
||||
raise HttpError(401, "Invalid or expired token")
|
||||
|
||||
|
||||
def _anonymous_or_real(request):
|
||||
|
||||
18
epdb/apps.py
18
epdb/apps.py
@ -1,9 +1,4 @@
|
||||
import logging
|
||||
|
||||
from django.apps import AppConfig
|
||||
from django.conf import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EPDBConfig(AppConfig):
|
||||
@ -12,16 +7,3 @@ class EPDBConfig(AppConfig):
|
||||
|
||||
def ready(self):
|
||||
import epdb.signals # noqa: F401
|
||||
|
||||
model_name = getattr(settings, "EPDB_PACKAGE_MODEL", "epdb.Package")
|
||||
logger.info(f"Using Package model: {model_name}")
|
||||
|
||||
from .autodiscovery import autodiscover
|
||||
|
||||
autodiscover()
|
||||
|
||||
if settings.PLUGINS_ENABLED:
|
||||
from bridge.contracts import Property
|
||||
from utilities.plugin import discover_plugins
|
||||
|
||||
settings.PROPERTY_PLUGINS.update(**discover_plugins(_cls=Property))
|
||||
|
||||
@ -1,5 +0,0 @@
|
||||
from django.utils.module_loading import autodiscover_modules
|
||||
|
||||
|
||||
def autodiscover():
|
||||
autodiscover_modules("epdb_hooks")
|
||||
@ -1,32 +0,0 @@
|
||||
"""
|
||||
Context processors for enviPy application.
|
||||
|
||||
Context processors automatically make variables available to all templates.
|
||||
"""
|
||||
|
||||
from .logic import PackageManager
|
||||
from django.conf import settings as s
|
||||
|
||||
|
||||
def package_context(request):
|
||||
"""
|
||||
Provides package data for the search modal which is included globally
|
||||
in framework_modern.html.
|
||||
|
||||
Returns:
|
||||
dict: Context dictionary with reviewed and unreviewed packages
|
||||
"""
|
||||
current_user = request.user
|
||||
|
||||
reviewed_package_qs = PackageManager.get_reviewed_packages()
|
||||
|
||||
unreviewed_package_qs = s.GET_PACKAGE_MODEL().objects.none()
|
||||
|
||||
# Only get user-specific packages if user is authenticated
|
||||
if current_user.is_authenticated:
|
||||
unreviewed_package_qs = PackageManager.get_all_readable_packages(current_user)
|
||||
|
||||
return {
|
||||
"reviewed_packages": reviewed_package_qs,
|
||||
"unreviewed_packages": unreviewed_package_qs,
|
||||
}
|
||||
1138
epdb/legacy_api.py
1138
epdb/legacy_api.py
File diff suppressed because it is too large
Load Diff
498
epdb/logic.py
498
epdb/logic.py
@ -1,42 +1,38 @@
|
||||
import logging
|
||||
import re
|
||||
from typing import Any, Dict, List, Optional, Set, Union, Tuple
|
||||
import logging
|
||||
import json
|
||||
from typing import Union, List, Optional, Set, Dict, Any
|
||||
from uuid import UUID
|
||||
|
||||
import nh3
|
||||
from django.conf import settings as s
|
||||
from django.contrib.auth import get_user_model
|
||||
from django.db import transaction
|
||||
from django.conf import settings as s
|
||||
from pydantic import ValidationError
|
||||
|
||||
from epdb.models import (
|
||||
AdditionalInformation,
|
||||
Compound,
|
||||
CompoundStructure,
|
||||
Edge,
|
||||
EnzymeLink,
|
||||
EPModel,
|
||||
ExpansionSchemeChoice,
|
||||
Group,
|
||||
GroupPackagePermission,
|
||||
Node,
|
||||
Pathway,
|
||||
Permission,
|
||||
PropertyPluginModel,
|
||||
Reaction,
|
||||
Rule,
|
||||
Setting,
|
||||
User,
|
||||
Package,
|
||||
UserPackagePermission,
|
||||
GroupPackagePermission,
|
||||
Permission,
|
||||
Group,
|
||||
Setting,
|
||||
EPModel,
|
||||
UserSettingPermission,
|
||||
Rule,
|
||||
Pathway,
|
||||
Node,
|
||||
Edge,
|
||||
Compound,
|
||||
Reaction,
|
||||
CompoundStructure,
|
||||
EnzymeLink,
|
||||
)
|
||||
from utilities.chem import FormatConverter
|
||||
from utilities.misc import PackageExporter, PackageImporter
|
||||
from utilities.misc import PackageImporter, PackageExporter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
Package = s.GET_PACKAGE_MODEL()
|
||||
|
||||
|
||||
class EPDBURLParser:
|
||||
UUID_PATTERN = r"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}"
|
||||
@ -189,12 +185,8 @@ class UserManager(object):
|
||||
def create_user(
|
||||
username, email, password, set_setting=True, add_to_group=True, *args, **kwargs
|
||||
):
|
||||
# Clean for potential XSS
|
||||
clean_username = nh3.clean(username).strip()
|
||||
clean_email = nh3.clean(email).strip()
|
||||
if clean_username != username or clean_email != email:
|
||||
# This will be caught by the try in view.py/register
|
||||
raise ValueError("Invalid username or password")
|
||||
# avoid circular import :S
|
||||
from .tasks import send_registration_mail
|
||||
|
||||
extra_fields = {"is_active": not s.ADMIN_APPROVAL_REQUIRED}
|
||||
|
||||
@ -213,6 +205,10 @@ class UserManager(object):
|
||||
u.default_package = p
|
||||
u.save()
|
||||
|
||||
if not u.is_active:
|
||||
# send email for verification
|
||||
send_registration_mail.delay(u.pk)
|
||||
|
||||
if set_setting:
|
||||
u.default_setting = Setting.objects.get(global_default=True)
|
||||
u.save()
|
||||
@ -266,9 +262,8 @@ class GroupManager(object):
|
||||
@staticmethod
|
||||
def create_group(current_user, name, description):
|
||||
g = Group()
|
||||
# Clean for potential XSS
|
||||
g.name = nh3.clean(name, tags=s.ALLOWED_HTML_TAGS).strip()
|
||||
g.description = nh3.clean(description, tags=s.ALLOWED_HTML_TAGS).strip()
|
||||
g.name = name
|
||||
g.description = description
|
||||
g.owner = current_user
|
||||
g.save()
|
||||
|
||||
@ -439,7 +434,6 @@ class PackageManager(object):
|
||||
if PackageManager.readable(user, p):
|
||||
return p
|
||||
else:
|
||||
# FIXME: use custom exception to be translatable to 403 in API
|
||||
raise ValueError(
|
||||
"Insufficient permissions to access Package with ID {}".format(package_id)
|
||||
)
|
||||
@ -524,13 +518,8 @@ class PackageManager(object):
|
||||
@transaction.atomic
|
||||
def create_package(current_user, name: str, description: str = None):
|
||||
p = Package()
|
||||
|
||||
# Clean for potential XSS
|
||||
p.name = nh3.clean(name, tags=s.ALLOWED_HTML_TAGS).strip()
|
||||
|
||||
if description is not None and description.strip() != "":
|
||||
p.description = nh3.clean(description.strip(), tags=s.ALLOWED_HTML_TAGS).strip()
|
||||
|
||||
p.name = name
|
||||
p.description = description
|
||||
p.save()
|
||||
|
||||
up = UserPackagePermission()
|
||||
@ -576,39 +565,30 @@ class PackageManager(object):
|
||||
else:
|
||||
_ = perm_cls.objects.update_or_create(defaults={"permission": new_perm}, **data)
|
||||
|
||||
@staticmethod
|
||||
def grant_read(caller: User, package: Package, grantee: Union[User, Group]):
|
||||
PackageManager.update_permissions(caller, package, grantee, Permission.READ[0])
|
||||
|
||||
@staticmethod
|
||||
def grant_write(caller: User, package: Package, grantee: Union[User, Group]):
|
||||
PackageManager.update_permissions(caller, package, grantee, Permission.WRITE[0])
|
||||
|
||||
@staticmethod
|
||||
@transaction.atomic
|
||||
def import_legacy_package(
|
||||
data: dict, owner: User, keep_ids=False, add_import_timestamp=True, trust_reviewed=False
|
||||
):
|
||||
from collections import defaultdict
|
||||
from datetime import datetime
|
||||
from uuid import UUID, uuid4
|
||||
|
||||
from envipy_additional_information import AdditionalInformationConverter
|
||||
|
||||
from datetime import datetime
|
||||
from collections import defaultdict
|
||||
from .models import (
|
||||
Package,
|
||||
Compound,
|
||||
CompoundStructure,
|
||||
Edge,
|
||||
Node,
|
||||
SimpleRule,
|
||||
SimpleAmbitRule,
|
||||
ParallelRule,
|
||||
Pathway,
|
||||
Reaction,
|
||||
Scenario,
|
||||
SequentialRule,
|
||||
SequentialRuleOrdering,
|
||||
SimpleAmbitRule,
|
||||
SimpleRule,
|
||||
Reaction,
|
||||
Pathway,
|
||||
Node,
|
||||
Edge,
|
||||
Scenario,
|
||||
)
|
||||
from envipy_additional_information import AdditionalInformationConverter
|
||||
|
||||
pack = Package()
|
||||
pack.uuid = UUID(data["id"].split("/")[-1]) if keep_ids else uuid4()
|
||||
@ -634,30 +614,15 @@ class PackageManager(object):
|
||||
|
||||
# Stores old_id to new_id
|
||||
mapping = {}
|
||||
# Stores new_scen_id to old_parent_scen_id
|
||||
parent_mapping = {}
|
||||
# Mapping old scen_id to old_obj_id
|
||||
scen_mapping = defaultdict(list)
|
||||
# Enzymelink Mapping rule_id to enzymelink objects
|
||||
enzyme_mapping = defaultdict(list)
|
||||
|
||||
# old_parent_id to child
|
||||
postponed_scens = defaultdict(list)
|
||||
|
||||
# Store Scenarios
|
||||
for scenario in data["scenarios"]:
|
||||
skip_scen = False
|
||||
# Check if parent exists and park this Scenario to convert it later into an
|
||||
# AdditionalInformation object
|
||||
for ex in scenario.get("additionalInformationCollection", {}).get(
|
||||
"additionalInformation", []
|
||||
):
|
||||
if ex["name"] == "referringscenario":
|
||||
postponed_scens[ex["data"]].append(scenario)
|
||||
skip_scen = True
|
||||
break
|
||||
|
||||
if skip_scen:
|
||||
continue
|
||||
|
||||
scen = Scenario()
|
||||
scen.package = pack
|
||||
scen.uuid = UUID(scenario["id"].split("/")[-1]) if keep_ids else uuid4()
|
||||
@ -670,12 +635,19 @@ class PackageManager(object):
|
||||
|
||||
mapping[scenario["id"]] = scen.uuid
|
||||
|
||||
new_add_inf = defaultdict(list)
|
||||
# TODO Store AI...
|
||||
for ex in scenario.get("additionalInformationCollection", {}).get(
|
||||
"additionalInformation", []
|
||||
):
|
||||
name = ex["name"]
|
||||
addinf_data = ex["data"]
|
||||
|
||||
# park the parent scen id for now and link it later
|
||||
if name == "referringscenario":
|
||||
parent_mapping[scen.uuid] = addinf_data
|
||||
continue
|
||||
|
||||
# Broken eP Data
|
||||
if name == "initialmasssediment" and addinf_data == "missing data":
|
||||
continue
|
||||
@ -683,11 +655,17 @@ class PackageManager(object):
|
||||
continue
|
||||
|
||||
try:
|
||||
ai = AdditionalInformationConverter.convert(name, addinf_data)
|
||||
AdditionalInformation.create(pack, ai, scenario=scen)
|
||||
except (ValidationError, ValueError):
|
||||
res = AdditionalInformationConverter.convert(name, addinf_data)
|
||||
res_cls_name = res.__class__.__name__
|
||||
ai_data = json.loads(res.model_dump_json())
|
||||
ai_data["uuid"] = f"{uuid4()}"
|
||||
new_add_inf[res_cls_name].append(ai_data)
|
||||
except ValidationError:
|
||||
logger.error(f"Failed to convert {name} with {addinf_data}")
|
||||
|
||||
scen.additional_information = new_add_inf
|
||||
scen.save()
|
||||
|
||||
print("Scenarios imported...")
|
||||
|
||||
# Store compounds and its structures
|
||||
@ -927,46 +905,14 @@ class PackageManager(object):
|
||||
|
||||
print("Pathways imported...")
|
||||
|
||||
for parent, children in postponed_scens.items():
|
||||
for child in children:
|
||||
for ex in child.get("additionalInformationCollection", {}).get(
|
||||
"additionalInformation", []
|
||||
):
|
||||
child_id = child["id"]
|
||||
name = ex["name"]
|
||||
addinf_data = ex["data"]
|
||||
|
||||
if name == "referringscenario":
|
||||
continue
|
||||
# Broken eP Data
|
||||
if name == "initialmasssediment" and addinf_data == "missing data":
|
||||
continue
|
||||
if name == "columnheight" and addinf_data == "(2)-(2.5);(6)-(8)":
|
||||
continue
|
||||
|
||||
ai = AdditionalInformationConverter.convert(name, addinf_data)
|
||||
|
||||
if child_id not in scen_mapping:
|
||||
logger.info(
|
||||
f"{child_id} not found in scen_mapping. Seems like its not attached to any object"
|
||||
)
|
||||
print(
|
||||
f"{child_id} not found in scen_mapping. Seems like its not attached to any object"
|
||||
)
|
||||
|
||||
scen = Scenario.objects.get(uuid=mapping[parent])
|
||||
mapping[child_id] = scen.uuid
|
||||
for obj in scen_mapping[child_id]:
|
||||
_ = AdditionalInformation.create(pack, ai, scen, content_object=obj)
|
||||
# Linking Phase
|
||||
for child, parent in parent_mapping.items():
|
||||
child_obj = Scenario.objects.get(uuid=child)
|
||||
parent_obj = Scenario.objects.get(uuid=mapping[parent])
|
||||
child_obj.parent = parent_obj
|
||||
child_obj.save()
|
||||
|
||||
for scen_id, objects in scen_mapping.items():
|
||||
new_id = mapping.get(scen_id)
|
||||
|
||||
if new_id is None:
|
||||
logger.warning(f"Could not find mapping for {scen_id}")
|
||||
print(f"Could not find mapping for {scen_id}")
|
||||
continue
|
||||
|
||||
scen = Scenario.objects.get(uuid=mapping[scen_id])
|
||||
for o in objects:
|
||||
o.scenarios.add(scen)
|
||||
@ -999,7 +945,6 @@ class PackageManager(object):
|
||||
matches = re.findall(r">(R[0-9]+)<", evidence["evidence"])
|
||||
if not matches or len(matches) != 1:
|
||||
logger.warning(f"Could not find reaction id in {evidence['evidence']}")
|
||||
print(f"Could not find reaction id in {evidence['evidence']}")
|
||||
continue
|
||||
|
||||
e.add_kegg_reaction_id(matches[0])
|
||||
@ -1019,6 +964,7 @@ class PackageManager(object):
|
||||
print("Fixing Node depths...")
|
||||
total_pws = Pathway.objects.filter(package=pack).count()
|
||||
for p, pw in enumerate(Pathway.objects.filter(package=pack)):
|
||||
print(pw.url)
|
||||
in_count = defaultdict(lambda: 0)
|
||||
out_count = defaultdict(lambda: 0)
|
||||
|
||||
@ -1054,6 +1000,7 @@ class PackageManager(object):
|
||||
if str(prod.uuid) not in seen:
|
||||
old_depth = prod.depth
|
||||
if old_depth != i + 1:
|
||||
print(f"updating depth from {old_depth} to {i + 1}")
|
||||
prod.depth = i + 1
|
||||
prod.save()
|
||||
|
||||
@ -1064,7 +1011,7 @@ class PackageManager(object):
|
||||
if new_level:
|
||||
levels.append(new_level)
|
||||
|
||||
print(f"{p + 1}/{total_pws} fixed.", end="\r")
|
||||
print(f"{p + 1}/{total_pws} fixed.")
|
||||
|
||||
return pack
|
||||
|
||||
@ -1143,43 +1090,32 @@ class SettingManager(object):
|
||||
description: str = None,
|
||||
max_nodes: int = None,
|
||||
max_depth: int = None,
|
||||
rule_packages: List[Package] | None = None,
|
||||
rule_packages: List[Package] = None,
|
||||
model: EPModel = None,
|
||||
model_threshold: float = None,
|
||||
expansion_scheme: ExpansionSchemeChoice = ExpansionSchemeChoice.BFS,
|
||||
property_models: List["PropertyPluginModel"] | None = None,
|
||||
):
|
||||
new_s = Setting()
|
||||
s = Setting()
|
||||
s.name = name
|
||||
s.description = description
|
||||
s.max_nodes = max_nodes
|
||||
s.max_depth = max_depth
|
||||
s.model = model
|
||||
s.model_threshold = model_threshold
|
||||
|
||||
# Clean for potential XSS
|
||||
new_s.name = nh3.clean(name, tags=s.ALLOWED_HTML_TAGS).strip()
|
||||
new_s.description = nh3.clean(description, tags=s.ALLOWED_HTML_TAGS).strip()
|
||||
|
||||
new_s.max_nodes = max_nodes
|
||||
new_s.max_depth = max_depth
|
||||
new_s.model = model
|
||||
new_s.model_threshold = model_threshold
|
||||
new_s.expansion_scheme = expansion_scheme
|
||||
|
||||
new_s.save()
|
||||
s.save()
|
||||
|
||||
if rule_packages is not None:
|
||||
for r in rule_packages:
|
||||
new_s.rule_packages.add(r)
|
||||
new_s.save()
|
||||
|
||||
if property_models is not None:
|
||||
for pm in property_models:
|
||||
new_s.property_models.add(pm)
|
||||
new_s.save()
|
||||
s.rule_packages.add(r)
|
||||
s.save()
|
||||
|
||||
usp = UserSettingPermission()
|
||||
usp.user = user
|
||||
usp.setting = new_s
|
||||
usp.setting = s
|
||||
usp.permission = Permission.ALL[0]
|
||||
usp.save()
|
||||
|
||||
return new_s
|
||||
return s
|
||||
|
||||
@staticmethod
|
||||
def get_default_setting(user: User):
|
||||
@ -1438,9 +1374,6 @@ class SEdge(object):
|
||||
self.rule = rule
|
||||
self.probability = probability
|
||||
|
||||
def product_smiles(self):
|
||||
return [p.smiles for p in self.products]
|
||||
|
||||
def __hash__(self):
|
||||
full_hash = 0
|
||||
|
||||
@ -1526,7 +1459,6 @@ class SPathway(object):
|
||||
self.smiles_to_node: Dict[str, SNode] = dict(**{n.smiles: n for n in self.root_nodes})
|
||||
self.edges: Set["SEdge"] = set()
|
||||
self.done = False
|
||||
self.empty_due_to_threshold = False
|
||||
|
||||
@staticmethod
|
||||
def from_pathway(pw: "Pathway", persist: bool = True):
|
||||
@ -1591,207 +1523,6 @@ class SPathway(object):
|
||||
|
||||
return sorted(res, key=lambda x: hash(x))
|
||||
|
||||
def _expand(self, substrates: List[SNode]) -> Tuple[List[SNode], List[SEdge]]:
|
||||
"""
|
||||
Expands the given substrates by generating new nodes and edges based on prediction settings.
|
||||
|
||||
This method processes a list of substrates and expands them into new nodes and edges using defined
|
||||
rules and settings. It evaluates each substrate to determine its applicability domain, persists
|
||||
domain assessments, and generates candidates for further processing. Newly created nodes and edges
|
||||
are returned, and any applicable information is stored or updated internally during the process.
|
||||
|
||||
Parameters:
|
||||
substrates (List[SNode]): A list of substrate nodes to be expanded.
|
||||
|
||||
Returns:
|
||||
Tuple[List[SNode], List[SEdge]]:
|
||||
A tuple containing:
|
||||
- A list of new nodes generated during the expansion.
|
||||
- A list of new edges representing connections between nodes based on candidate reactions.
|
||||
|
||||
Raises:
|
||||
ValueError: If a node does not have an ID when it should have been saved already.
|
||||
"""
|
||||
new_nodes: List[SNode] = []
|
||||
new_edges: List[SEdge] = []
|
||||
|
||||
for sub in substrates:
|
||||
# For App Domain we have to ensure that each Node is evaluated
|
||||
if sub.app_domain_assessment is None:
|
||||
if self.prediction_setting.model:
|
||||
if self.prediction_setting.model.app_domain:
|
||||
app_domain_assessment = self.prediction_setting.model.app_domain.assess(
|
||||
sub.smiles
|
||||
)
|
||||
|
||||
if self.persist is not None:
|
||||
n = self.snode_persist_lookup[sub]
|
||||
|
||||
if n.id is None:
|
||||
raise ValueError(f"Node {n} has no ID... aborting!")
|
||||
|
||||
node_data = n.simple_json()
|
||||
node_data["image"] = f"{n.url}?image=svg"
|
||||
app_domain_assessment["assessment"]["node"] = node_data
|
||||
|
||||
n.kv["app_domain_assessment"] = app_domain_assessment
|
||||
n.save()
|
||||
|
||||
sub.app_domain_assessment = app_domain_assessment
|
||||
|
||||
expansion_result = self.prediction_setting.expand(self, sub)
|
||||
|
||||
# We don't have any substrate, but technically we have at least one rule that triggered.
|
||||
# If our substrate is a root node a.k.a. depth == 0 store that info in SPathway
|
||||
if (
|
||||
len(expansion_result["transformations"]) == 0
|
||||
and expansion_result["rule_triggered"]
|
||||
and sub.depth == 0
|
||||
):
|
||||
self.empty_due_to_threshold = True
|
||||
|
||||
# Emit directly
|
||||
if self.persist is not None:
|
||||
self.persist.kv["empty_due_to_threshold"] = True
|
||||
self.persist.save()
|
||||
|
||||
# candidates is a List of PredictionResult. The length of the List is equal to the number of rules
|
||||
for cand_set in expansion_result["transformations"]:
|
||||
if cand_set:
|
||||
# cand_set is a PredictionResult object that can consist of multiple candidate reactions
|
||||
for cand in cand_set:
|
||||
cand_nodes = []
|
||||
# candidate reactions can have multiple fragments
|
||||
for c in cand:
|
||||
if c not in self.smiles_to_node:
|
||||
# For new nodes do an AppDomain Assessment if an AppDomain is attached
|
||||
app_domain_assessment = None
|
||||
if self.prediction_setting.model:
|
||||
if self.prediction_setting.model.app_domain:
|
||||
app_domain_assessment = (
|
||||
self.prediction_setting.model.app_domain.assess(c)
|
||||
)
|
||||
snode = SNode(c, sub.depth + 1, app_domain_assessment)
|
||||
self.smiles_to_node[c] = snode
|
||||
new_nodes.append(snode)
|
||||
|
||||
node = self.smiles_to_node[c]
|
||||
cand_nodes.append(node)
|
||||
|
||||
edge = SEdge(
|
||||
sub,
|
||||
cand_nodes,
|
||||
rule=cand_set.rule,
|
||||
probability=cand_set.probability,
|
||||
)
|
||||
self.edges.add(edge)
|
||||
new_edges.append(edge)
|
||||
|
||||
return new_nodes, new_edges
|
||||
|
||||
def predict(self):
|
||||
"""
|
||||
Predicts outcomes based on a graph traversal algorithm using the specified expansion schema.
|
||||
|
||||
This method iteratively explores the nodes of a graph starting from the root nodes, propagating
|
||||
probabilities through edges, and updating the probabilities of the connected nodes. The traversal
|
||||
can follow one of three predefined expansion schemas: Depth-First Search (DFS), Breadth-First Search
|
||||
(BFS), or a Greedy approach based on node probabilities. The methodology ensures that all reachable
|
||||
nodes are processed systematically according to the specified schema.
|
||||
|
||||
Errors will be raised if the expansion schema is undefined or invalid. Additionally, this method
|
||||
supports persisting changes by writing back data to the database when configured to do so.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
done : bool
|
||||
A flag indicating whether the prediction process is completed.
|
||||
persist : Any
|
||||
An optional object that manages persistence operations for saving modifications.
|
||||
root_nodes : List[SNode]
|
||||
A collection of initial nodes in the graph from which traversal begins.
|
||||
prediction_setting : Any
|
||||
Configuration object specifying settings for graph traversal, such as the choice of
|
||||
expansion schema.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If an invalid or unknown expansion schema is provided in `prediction_setting`.
|
||||
"""
|
||||
# populate initial queue
|
||||
queue = list(self.root_nodes)
|
||||
processed = set()
|
||||
|
||||
# initial nodes have prob 1.0
|
||||
node_probs: Dict[SNode, float] = {}
|
||||
node_probs.update({n: 1.0 for n in queue})
|
||||
|
||||
while queue:
|
||||
current = queue.pop(0)
|
||||
|
||||
if current in processed:
|
||||
continue
|
||||
|
||||
processed.add(current)
|
||||
|
||||
new_nodes, new_edges = self._expand([current])
|
||||
|
||||
if new_nodes or new_edges:
|
||||
# Check if we need to write back data to the database
|
||||
if self.persist:
|
||||
self._sync_to_pathway()
|
||||
# call save to update the internal modified field
|
||||
self.persist.save()
|
||||
|
||||
if new_nodes:
|
||||
for edge in new_edges:
|
||||
# All edge have `current` as educt
|
||||
# Use `current` and adjust probs
|
||||
current_prob = node_probs[current]
|
||||
|
||||
for prod in edge.products:
|
||||
# Either is a new product or a product and we found a path with a higher prob
|
||||
if (
|
||||
prod not in node_probs
|
||||
or current_prob * edge.probability > node_probs[prod]
|
||||
):
|
||||
node_probs[prod] = current_prob * edge.probability
|
||||
|
||||
# Update Queue to proceed
|
||||
if self.prediction_setting.expansion_scheme == "DFS":
|
||||
for n in new_nodes:
|
||||
if n not in processed:
|
||||
# We want to follow this path -> prepend queue
|
||||
queue.insert(0, n)
|
||||
elif self.prediction_setting.expansion_scheme == "BFS":
|
||||
for n in new_nodes:
|
||||
if n not in processed:
|
||||
# Add at the end, everything queued before will be processed
|
||||
# before new_nodese
|
||||
queue.append(n)
|
||||
elif self.prediction_setting.expansion_scheme == "GREEDY":
|
||||
# Simply add them, as we will re-order the queue later
|
||||
for n in new_nodes:
|
||||
if n not in processed:
|
||||
queue.append(n)
|
||||
|
||||
node_and_probs = []
|
||||
for queued_val in queue:
|
||||
node_and_probs.append((queued_val, node_probs[queued_val]))
|
||||
|
||||
# re-order the queue and only pick smiles
|
||||
queue = [
|
||||
n[0] for n in sorted(node_and_probs, key=lambda x: x[1], reverse=True)
|
||||
]
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unknown expansion schema: {self.prediction_setting.expansion_scheme}"
|
||||
)
|
||||
|
||||
# Queue exhausted, we're done
|
||||
self.done = True
|
||||
|
||||
def predict_step(self, from_depth: int = None, from_node: "Node" = None):
|
||||
substrates: List[SNode] = []
|
||||
|
||||
@ -1802,15 +1533,63 @@ class SPathway(object):
|
||||
if from_node == v:
|
||||
substrates = [k]
|
||||
break
|
||||
else:
|
||||
raise ValueError(f"Node {from_node} not found in SPathway!")
|
||||
else:
|
||||
raise ValueError("Neither from_depth nor from_node_url specified")
|
||||
|
||||
new_tp = False
|
||||
if substrates:
|
||||
new_nodes, _ = self._expand(substrates)
|
||||
new_tp = len(new_nodes) > 0
|
||||
for sub in substrates:
|
||||
if sub.app_domain_assessment is None:
|
||||
if self.prediction_setting.model:
|
||||
if self.prediction_setting.model.app_domain:
|
||||
app_domain_assessment = self.prediction_setting.model.app_domain.assess(sub.smiles)
|
||||
|
||||
if self.persist is not None:
|
||||
n = self.snode_persist_lookup[sub]
|
||||
|
||||
assert n.id is not None, (
|
||||
"Node has no id! Should have been saved already... aborting!"
|
||||
)
|
||||
node_data = n.simple_json()
|
||||
node_data["image"] = f"{n.url}?image=svg"
|
||||
app_domain_assessment["assessment"]["node"] = node_data
|
||||
|
||||
n.kv["app_domain_assessment"] = app_domain_assessment
|
||||
n.save()
|
||||
|
||||
sub.app_domain_assessment = app_domain_assessment
|
||||
|
||||
candidates = self.prediction_setting.expand(self, sub)
|
||||
# candidates is a List of PredictionResult. The length of the List is equal to the number of rules
|
||||
for cand_set in candidates:
|
||||
if cand_set:
|
||||
new_tp = True
|
||||
# cand_set is a PredictionResult object that can consist of multiple candidate reactions
|
||||
for cand in cand_set:
|
||||
cand_nodes = []
|
||||
# candidate reactions can have multiple fragments
|
||||
for c in cand:
|
||||
if c not in self.smiles_to_node:
|
||||
# For new nodes do an AppDomain Assessment if an AppDomain is attached
|
||||
app_domain_assessment = None
|
||||
if self.prediction_setting.model:
|
||||
if self.prediction_setting.model.app_domain:
|
||||
app_domain_assessment = (self.prediction_setting.model.app_domain.assess(c))
|
||||
|
||||
self.smiles_to_node[c] = SNode(
|
||||
c, sub.depth + 1, app_domain_assessment
|
||||
)
|
||||
|
||||
node = self.smiles_to_node[c]
|
||||
cand_nodes.append(node)
|
||||
|
||||
edge = SEdge(
|
||||
sub,
|
||||
cand_nodes,
|
||||
rule=cand_set.rule,
|
||||
probability=cand_set.probability,
|
||||
)
|
||||
self.edges.add(edge)
|
||||
|
||||
# In case no substrates are found, we're done.
|
||||
# For "predict from node" we're always done
|
||||
@ -1823,14 +1602,6 @@ class SPathway(object):
|
||||
# call save to update the internal modified field
|
||||
self.persist.save()
|
||||
|
||||
def get_edge_for_educt_smiles(self, smiles: str) -> List[SEdge]:
|
||||
res = []
|
||||
for e in self.edges:
|
||||
for n in e.educts:
|
||||
if n.smiles == smiles:
|
||||
res.append(e)
|
||||
return res
|
||||
|
||||
def _sync_to_pathway(self) -> None:
|
||||
logger.info("Updating Pathway with SPathway")
|
||||
|
||||
@ -1894,6 +1665,11 @@ class SPathway(object):
|
||||
"to": to_indices,
|
||||
}
|
||||
|
||||
# if edge.rule:
|
||||
# e['rule'] = {
|
||||
# 'name': edge.rule.name,
|
||||
# 'id': edge.rule.url,
|
||||
# }
|
||||
edges.append(e)
|
||||
|
||||
return {
|
||||
|
||||
@ -8,19 +8,14 @@ from epdb.logic import UserManager, GroupManager, PackageManager, SettingManager
|
||||
from epdb.models import (
|
||||
UserSettingPermission,
|
||||
MLRelativeReasoning,
|
||||
EnviFormer,
|
||||
Permission,
|
||||
User,
|
||||
ExternalDatabase,
|
||||
License,
|
||||
)
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
"-ol", "--only-licenses", action="store_true", help="Only create licenses."
|
||||
)
|
||||
|
||||
def create_users(self):
|
||||
# Anonymous User
|
||||
if not User.objects.filter(email="anon@envipath.com").exists():
|
||||
@ -88,17 +83,6 @@ class Command(BaseCommand):
|
||||
|
||||
return anon, admin, g, user0
|
||||
|
||||
def create_licenses(self):
|
||||
"""Create the six default licenses supported by enviPath"""
|
||||
cc_strings = ["by", "by-nc", "by-nc-nd", "by-nc-sa", "by-nd", "by-sa"]
|
||||
for cc_string in cc_strings:
|
||||
if not License.objects.filter(cc_string=cc_string).exists():
|
||||
new_license = License()
|
||||
new_license.cc_string = cc_string
|
||||
new_license.link = f"https://creativecommons.org/licenses/{cc_string}/4.0/"
|
||||
new_license.image_link = f"https://licensebuttons.net/l/{cc_string}/4.0/88x31.png"
|
||||
new_license.save()
|
||||
|
||||
def import_package(self, data, owner):
|
||||
return PackageManager.import_legacy_package(
|
||||
data, owner, keep_ids=True, add_import_timestamp=False, trust_reviewed=True
|
||||
@ -173,10 +157,6 @@ class Command(BaseCommand):
|
||||
|
||||
@transaction.atomic
|
||||
def handle(self, *args, **options):
|
||||
# Create licenses
|
||||
self.create_licenses()
|
||||
if options.get("only_licenses", False):
|
||||
return
|
||||
# Create users
|
||||
anon, admin, g, user0 = self.create_users()
|
||||
|
||||
@ -230,6 +210,7 @@ class Command(BaseCommand):
|
||||
package=pack,
|
||||
rule_packages=[mapping["EAWAG-BBD"]],
|
||||
data_packages=[mapping["EAWAG-BBD"]],
|
||||
eval_packages=[],
|
||||
threshold=0.5,
|
||||
name="ECC - BBD - T0.5",
|
||||
description="ML Relative Reasoning",
|
||||
@ -237,3 +218,7 @@ class Command(BaseCommand):
|
||||
|
||||
ml_model.build_dataset()
|
||||
ml_model.build_model()
|
||||
|
||||
# If available, create EnviFormerModel
|
||||
if s.ENVIFORMER_PRESENT:
|
||||
EnviFormer.create(pack, "EnviFormer - T0.5", "EnviFormer Model with Threshold 0.5", 0.5)
|
||||
|
||||
@ -1,92 +0,0 @@
|
||||
from django.conf import settings as s
|
||||
from django.contrib.auth import get_user_model
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
|
||||
from epdb.models import APIToken
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "Create an API token for a user"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
"--username",
|
||||
required=True,
|
||||
help="Username of the user who will own the token",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--name",
|
||||
required=True,
|
||||
help="Descriptive name for the token",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--expires-days",
|
||||
type=int,
|
||||
default=90,
|
||||
help="Days until expiration (0 for no expiration)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--inactive",
|
||||
action="store_true",
|
||||
help="Create the token as inactive",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--curl",
|
||||
action="store_true",
|
||||
help="Print a curl example using the token",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--base-url",
|
||||
default=None,
|
||||
help="Base URL for curl example (default SERVER_URL or http://localhost:8000)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--endpoint",
|
||||
default="/api/v1/compounds/",
|
||||
help="Endpoint path for curl example",
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
username = options["username"]
|
||||
name = options["name"]
|
||||
expires_days = options["expires_days"]
|
||||
|
||||
if expires_days < 0:
|
||||
raise CommandError("--expires-days must be >= 0")
|
||||
|
||||
if expires_days == 0:
|
||||
expires_days = None
|
||||
|
||||
user_model = get_user_model()
|
||||
try:
|
||||
user = user_model.objects.get(username=username)
|
||||
except user_model.DoesNotExist as exc:
|
||||
raise CommandError(f"User not found for username '{username}'") from exc
|
||||
|
||||
token, raw_token = APIToken.create_token(user, name=name, expires_days=expires_days)
|
||||
|
||||
if options["inactive"]:
|
||||
token.is_active = False
|
||||
token.save(update_fields=["is_active"])
|
||||
|
||||
self.stdout.write(f"User: {user.username} ({user.email})")
|
||||
self.stdout.write(f"Token name: {token.name}")
|
||||
self.stdout.write(f"Token id: {token.id}")
|
||||
if token.expires_at:
|
||||
self.stdout.write(f"Expires at: {token.expires_at.isoformat()}")
|
||||
else:
|
||||
self.stdout.write("Expires at: never")
|
||||
self.stdout.write(f"Active: {token.is_active}")
|
||||
self.stdout.write("Raw token:")
|
||||
self.stdout.write(raw_token)
|
||||
|
||||
if options["curl"]:
|
||||
base_url = (
|
||||
options["base_url"] or getattr(s, "SERVER_URL", None) or "http://localhost:8000"
|
||||
)
|
||||
endpoint = options["endpoint"]
|
||||
endpoint = endpoint if endpoint.startswith("/") else f"/{endpoint}"
|
||||
url = f"{base_url.rstrip('/')}{endpoint}"
|
||||
curl_cmd = f'curl -H "Authorization: Bearer {raw_token}" "{url}"'
|
||||
self.stdout.write("Curl:")
|
||||
self.stdout.write(curl_cmd)
|
||||
@ -2,9 +2,7 @@ from django.conf import settings as s
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import transaction
|
||||
|
||||
from epdb.models import EnviFormer, MLRelativeReasoning
|
||||
|
||||
Package = s.GET_PACKAGE_MODEL()
|
||||
from epdb.models import MLRelativeReasoning, EnviFormer, Package
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@ -77,13 +75,11 @@ class Command(BaseCommand):
|
||||
return packages
|
||||
|
||||
# Iteratively create models in options["model_names"]
|
||||
print(
|
||||
f"Creating models: {options['model_names']}\n"
|
||||
f"Data packages: {options['data_packages']}\n"
|
||||
f"Rule Packages (only for MLRR): {options['rule_packages']}\n"
|
||||
f"Eval Packages: {options['eval_packages']}\n"
|
||||
f"Threshold: {options['threshold']:.2f}"
|
||||
)
|
||||
print(f"Creating models: {options['model_names']}\n"
|
||||
f"Data packages: {options['data_packages']}\n"
|
||||
f"Rule Packages (only for MLRR): {options['rule_packages']}\n"
|
||||
f"Eval Packages: {options['eval_packages']}\n"
|
||||
f"Threshold: {options['threshold']:.2f}")
|
||||
data_packages = decode_packages(options["data_packages"])
|
||||
eval_packages = decode_packages(options["eval_packages"])
|
||||
rule_packages = decode_packages(options["rule_packages"])
|
||||
@ -93,20 +89,22 @@ class Command(BaseCommand):
|
||||
model = EnviFormer.create(
|
||||
pack,
|
||||
data_packages=data_packages,
|
||||
threshold=options["threshold"],
|
||||
eval_packages=eval_packages,
|
||||
threshold=options['threshold'],
|
||||
name=f"EnviFormer - {', '.join(options['data_packages'])} - T{options['threshold']:.2f}",
|
||||
description=f"EnviFormer transformer trained on {options['data_packages']} "
|
||||
f"evaluated on {options['eval_packages']}.",
|
||||
f"evaluated on {options['eval_packages']}.",
|
||||
)
|
||||
elif model_name == "mlrr":
|
||||
model = MLRelativeReasoning.create(
|
||||
package=pack,
|
||||
rule_packages=rule_packages,
|
||||
data_packages=data_packages,
|
||||
threshold=options["threshold"],
|
||||
eval_packages=eval_packages,
|
||||
threshold=options['threshold'],
|
||||
name=f"ECC - {', '.join(options['data_packages'])} - T{options['threshold']:.2f}",
|
||||
description=f"ML Relative Reasoning trained on {options['data_packages']} with rules from "
|
||||
f"{options['rule_packages']} and evaluated on {options['eval_packages']}.",
|
||||
f"{options['rule_packages']} and evaluated on {options['eval_packages']}.",
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Cannot create model of type {model_name}, unknown model type")
|
||||
|
||||
@ -47,7 +47,7 @@ class Command(BaseCommand):
|
||||
"description": model.description,
|
||||
"kv": model.kv,
|
||||
"data_packages_uuids": [str(p.uuid) for p in model.data_packages.all()],
|
||||
"eval_packages_uuids": [str(p.uuid) for p in model.eval_packages.all()],
|
||||
"eval_packages_uuids": [str(p.uuid) for p in model.data_packages.all()],
|
||||
"threshold": model.threshold,
|
||||
"eval_results": model.eval_results,
|
||||
"multigen_eval": model.multigen_eval,
|
||||
|
||||
@ -8,9 +8,7 @@ from django.conf import settings as s
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import transaction
|
||||
|
||||
from epdb.models import EnviFormer
|
||||
|
||||
Package = s.GET_PACKAGE_MODEL()
|
||||
from epdb.models import EnviFormer, Package
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
from django.apps import apps
|
||||
from django.conf import settings as s
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db.models import F, JSONField, TextField, Value
|
||||
from django.db.models.functions import Cast, Replace
|
||||
|
||||
from django.db.models import F, Value, TextField, JSONField
|
||||
from django.db.models.functions import Replace, Cast
|
||||
|
||||
from epdb.models import EnviPathModel
|
||||
|
||||
@ -23,12 +23,10 @@ class Command(BaseCommand):
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
Package = s.GET_PACKAGE_MODEL()
|
||||
Package.objects.update(url=Replace(F("url"), Value(options["old"]), Value(options["new"])))
|
||||
|
||||
MODELS = [
|
||||
"User",
|
||||
"Group",
|
||||
"Package",
|
||||
"Compound",
|
||||
"CompoundStructure",
|
||||
"Pathway",
|
||||
@ -41,12 +39,15 @@ class Command(BaseCommand):
|
||||
"SequentialRule",
|
||||
"Scenario",
|
||||
"Setting",
|
||||
"EPModel",
|
||||
"MLRelativeReasoning",
|
||||
"RuleBasedRelativeReasoning",
|
||||
"EnviFormer",
|
||||
"ApplicabilityDomain",
|
||||
"EnzymeLink",
|
||||
]
|
||||
for model in MODELS:
|
||||
obj_cls = apps.get_model("epdb", model)
|
||||
print(f"Localizing urls for {model}")
|
||||
obj_cls.objects.update(
|
||||
url=Replace(F("url"), Value(options["old"]), Value(options["new"]))
|
||||
)
|
||||
|
||||
@ -1,83 +0,0 @@
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.management import call_command
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
"-n",
|
||||
"--name",
|
||||
type=str,
|
||||
help="Name of the database to recreate. Default is 'appdb'",
|
||||
default="appdb",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--dump",
|
||||
type=str,
|
||||
help="Path to the dump file",
|
||||
default="./fixtures/db.dump",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-ou",
|
||||
"--oldurl",
|
||||
type=str,
|
||||
help="Old URL, e.g. https://envipath.org/",
|
||||
default="https://envipath.org/",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-nu",
|
||||
"--newurl",
|
||||
type=str,
|
||||
help="New URL, e.g. http://localhost:8000/",
|
||||
default="http://localhost:8000/",
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
dump_file = options["dump"]
|
||||
|
||||
if not os.path.exists(dump_file):
|
||||
raise ValueError(f"Dump file {dump_file} does not exist")
|
||||
|
||||
db_name = options["name"]
|
||||
|
||||
print(f"Dropping database {db_name} y/n: ", end="")
|
||||
|
||||
if input() in "yY":
|
||||
result = subprocess.run(
|
||||
["dropdb", db_name],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
print(result.stdout)
|
||||
else:
|
||||
raise ValueError("Aborted")
|
||||
|
||||
print(f"Creating database {db_name}")
|
||||
|
||||
result = subprocess.run(
|
||||
["createdb", db_name],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
print(result.stdout)
|
||||
print(f"Restoring database {db_name} from {dump_file}")
|
||||
|
||||
result = subprocess.run(
|
||||
["pg_restore", "-d", db_name, dump_file, "--no-owner"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
print(result.stdout)
|
||||
|
||||
if db_name == settings.DATABASES["default"]["NAME"]:
|
||||
call_command("localize_urls", "--old", options["oldurl"], "--new", options["newurl"])
|
||||
else:
|
||||
print("Skipping localize_urls as database is not the default one.")
|
||||
@ -1,18 +0,0 @@
|
||||
# Generated by Django 5.2.7 on 2025-11-11 14:11
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("epdb", "0009_joblog"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="license",
|
||||
name="cc_string",
|
||||
field=models.TextField(default="by-nc-sa", verbose_name="CC string"),
|
||||
preserve_default=False,
|
||||
),
|
||||
]
|
||||
@ -1,59 +0,0 @@
|
||||
# Generated by Django 5.2.7 on 2025-11-11 14:13
|
||||
|
||||
import re
|
||||
|
||||
from django.contrib.postgres.aggregates import ArrayAgg
|
||||
from django.db import migrations
|
||||
from django.db.models import Min
|
||||
|
||||
|
||||
def set_cc(apps, schema_editor):
|
||||
License = apps.get_model("epdb", "License")
|
||||
|
||||
# For all existing licenses extract cc_string from link
|
||||
for license in License.objects.all():
|
||||
pattern = r"/licenses/([^/]+)/4\.0"
|
||||
match = re.search(pattern, license.link)
|
||||
if match:
|
||||
license.cc_string = match.group(1)
|
||||
license.save()
|
||||
else:
|
||||
raise ValueError(f"Could not find license for {license.link}")
|
||||
|
||||
# Ensure we have all licenses
|
||||
cc_strings = ["by", "by-nc", "by-nc-nd", "by-nc-sa", "by-nd", "by-sa"]
|
||||
for cc_string in cc_strings:
|
||||
if not License.objects.filter(cc_string=cc_string).exists():
|
||||
new_license = License()
|
||||
new_license.cc_string = cc_string
|
||||
new_license.link = f"https://creativecommons.org/licenses/{cc_string}/4.0/"
|
||||
new_license.image_link = f"https://licensebuttons.net/l/{cc_string}/4.0/88x31.png"
|
||||
new_license.save()
|
||||
|
||||
# As we might have existing Licenses representing the same License,
|
||||
# get min pk and all pks as a list
|
||||
license_lookup_qs = License.objects.values("cc_string").annotate(
|
||||
lowest_pk=Min("id"), all_pks=ArrayAgg("id", order_by=("id",))
|
||||
)
|
||||
|
||||
license_lookup = {
|
||||
row["cc_string"]: (row["lowest_pk"], row["all_pks"]) for row in license_lookup_qs
|
||||
}
|
||||
|
||||
Packages = apps.get_model("epdb", "Package")
|
||||
|
||||
for k, v in license_lookup.items():
|
||||
# Set min pk to all packages pointing to any of the duplicates
|
||||
Packages.objects.filter(pk__in=v[1]).update(license_id=v[0])
|
||||
# remove the min pk from "other" pks as we use them for deletion
|
||||
v[1].remove(v[0])
|
||||
# Delete redundant License objects
|
||||
License.objects.filter(pk__in=v[1]).delete()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("epdb", "0010_license_cc_string"),
|
||||
]
|
||||
|
||||
operations = [migrations.RunPython(set_cc)]
|
||||
@ -1,22 +0,0 @@
|
||||
# Generated by Django 5.2.7 on 2025-12-02 13:09
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("epdb", "0011_auto_20251111_1413"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="node",
|
||||
name="stereo_removed",
|
||||
field=models.BooleanField(default=False),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name="pathway",
|
||||
name="predicted",
|
||||
field=models.BooleanField(default=False),
|
||||
),
|
||||
]
|
||||
@ -1,25 +0,0 @@
|
||||
# Generated by Django 5.2.7 on 2025-12-14 11:30
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("epdb", "0012_node_stereo_removed_pathway_predicted"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="setting",
|
||||
name="expansion_schema",
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
("BFS", "Breadth First Search"),
|
||||
("DFS", "Depth First Search"),
|
||||
("GREEDY", "Greedy"),
|
||||
],
|
||||
default="BFS",
|
||||
max_length=20,
|
||||
),
|
||||
),
|
||||
]
|
||||
@ -1,17 +0,0 @@
|
||||
# Generated by Django 5.2.7 on 2025-12-14 16:02
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("epdb", "0013_setting_expansion_schema"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RenameField(
|
||||
model_name="setting",
|
||||
old_name="expansion_schema",
|
||||
new_name="expansion_scheme",
|
||||
),
|
||||
]
|
||||
@ -1,17 +0,0 @@
|
||||
# Generated by Django 5.2.7 on 2026-01-19 19:26
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("epdb", "0014_rename_expansion_schema_setting_expansion_scheme"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="user",
|
||||
name="is_reviewer",
|
||||
field=models.BooleanField(default=False),
|
||||
),
|
||||
]
|
||||
@ -1,179 +0,0 @@
|
||||
# Generated by Django 5.2.7 on 2026-02-12 09:38
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("epdb", "0015_user_is_reviewer"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name="enviformer",
|
||||
name="model_status",
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name="mlrelativereasoning",
|
||||
name="model_status",
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name="rulebasedrelativereasoning",
|
||||
name="model_status",
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name="epmodel",
|
||||
name="model_status",
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
("INITIAL", "Initial"),
|
||||
("INITIALIZING", "Model is initializing."),
|
||||
("BUILDING", "Model is building."),
|
||||
(
|
||||
"BUILT_NOT_EVALUATED",
|
||||
"Model is built and can be used for predictions, Model is not evaluated yet.",
|
||||
),
|
||||
("EVALUATING", "Model is evaluating"),
|
||||
("FINISHED", "Model has finished building and evaluation."),
|
||||
("ERROR", "Model has failed."),
|
||||
],
|
||||
default="INITIAL",
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name="enviformer",
|
||||
name="eval_packages",
|
||||
field=models.ManyToManyField(
|
||||
blank=True,
|
||||
related_name="%(app_label)s_%(class)s_eval_packages",
|
||||
to=settings.EPDB_PACKAGE_MODEL,
|
||||
verbose_name="Evaluation Packages",
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name="enviformer",
|
||||
name="rule_packages",
|
||||
field=models.ManyToManyField(
|
||||
blank=True,
|
||||
related_name="%(app_label)s_%(class)s_rule_packages",
|
||||
to=settings.EPDB_PACKAGE_MODEL,
|
||||
verbose_name="Rule Packages",
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name="mlrelativereasoning",
|
||||
name="eval_packages",
|
||||
field=models.ManyToManyField(
|
||||
blank=True,
|
||||
related_name="%(app_label)s_%(class)s_eval_packages",
|
||||
to=settings.EPDB_PACKAGE_MODEL,
|
||||
verbose_name="Evaluation Packages",
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name="mlrelativereasoning",
|
||||
name="rule_packages",
|
||||
field=models.ManyToManyField(
|
||||
blank=True,
|
||||
related_name="%(app_label)s_%(class)s_rule_packages",
|
||||
to=settings.EPDB_PACKAGE_MODEL,
|
||||
verbose_name="Rule Packages",
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name="rulebasedrelativereasoning",
|
||||
name="eval_packages",
|
||||
field=models.ManyToManyField(
|
||||
blank=True,
|
||||
related_name="%(app_label)s_%(class)s_eval_packages",
|
||||
to=settings.EPDB_PACKAGE_MODEL,
|
||||
verbose_name="Evaluation Packages",
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name="rulebasedrelativereasoning",
|
||||
name="rule_packages",
|
||||
field=models.ManyToManyField(
|
||||
blank=True,
|
||||
related_name="%(app_label)s_%(class)s_rule_packages",
|
||||
to=settings.EPDB_PACKAGE_MODEL,
|
||||
verbose_name="Rule Packages",
|
||||
),
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name="PropertyPluginModel",
|
||||
fields=[
|
||||
(
|
||||
"epmodel_ptr",
|
||||
models.OneToOneField(
|
||||
auto_created=True,
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
parent_link=True,
|
||||
primary_key=True,
|
||||
serialize=False,
|
||||
to="epdb.epmodel",
|
||||
),
|
||||
),
|
||||
("threshold", models.FloatField(default=0.5)),
|
||||
("eval_results", models.JSONField(blank=True, default=dict, null=True)),
|
||||
("multigen_eval", models.BooleanField(default=False)),
|
||||
("plugin_identifier", models.CharField(max_length=255)),
|
||||
(
|
||||
"app_domain",
|
||||
models.ForeignKey(
|
||||
blank=True,
|
||||
default=None,
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
to="epdb.applicabilitydomain",
|
||||
),
|
||||
),
|
||||
(
|
||||
"data_packages",
|
||||
models.ManyToManyField(
|
||||
blank=True,
|
||||
related_name="%(app_label)s_%(class)s_data_packages",
|
||||
to=settings.EPDB_PACKAGE_MODEL,
|
||||
verbose_name="Data Packages",
|
||||
),
|
||||
),
|
||||
(
|
||||
"eval_packages",
|
||||
models.ManyToManyField(
|
||||
blank=True,
|
||||
related_name="%(app_label)s_%(class)s_eval_packages",
|
||||
to=settings.EPDB_PACKAGE_MODEL,
|
||||
verbose_name="Evaluation Packages",
|
||||
),
|
||||
),
|
||||
(
|
||||
"rule_packages",
|
||||
models.ManyToManyField(
|
||||
blank=True,
|
||||
related_name="%(app_label)s_%(class)s_rule_packages",
|
||||
to=settings.EPDB_PACKAGE_MODEL,
|
||||
verbose_name="Rule Packages",
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
"abstract": False,
|
||||
},
|
||||
bases=("epdb.epmodel",),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name="setting",
|
||||
name="property_models",
|
||||
field=models.ManyToManyField(
|
||||
blank=True,
|
||||
related_name="settings",
|
||||
to="epdb.propertypluginmodel",
|
||||
verbose_name="Setting Property Models",
|
||||
),
|
||||
),
|
||||
migrations.DeleteModel(
|
||||
name="PluginModel",
|
||||
),
|
||||
]
|
||||
@ -1,93 +0,0 @@
|
||||
# Generated by Django 5.2.7 on 2026-02-20 12:02
|
||||
|
||||
import django.db.models.deletion
|
||||
import uuid
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("contenttypes", "0002_remove_content_type_name"),
|
||||
("epdb", "0016_remove_enviformer_model_status_and_more"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name="AdditionalInformation",
|
||||
fields=[
|
||||
(
|
||||
"id",
|
||||
models.BigAutoField(
|
||||
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
|
||||
),
|
||||
),
|
||||
("uuid", models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
|
||||
("url", models.TextField(null=True, unique=True, verbose_name="URL")),
|
||||
("kv", models.JSONField(blank=True, default=dict, null=True)),
|
||||
("type", models.TextField(verbose_name="Additional Information Type")),
|
||||
("data", models.JSONField(blank=True, default=dict, null=True)),
|
||||
("object_id", models.PositiveBigIntegerField(blank=True, null=True)),
|
||||
(
|
||||
"content_type",
|
||||
models.ForeignKey(
|
||||
blank=True,
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
to="contenttypes.contenttype",
|
||||
),
|
||||
),
|
||||
(
|
||||
"package",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
to=settings.EPDB_PACKAGE_MODEL,
|
||||
verbose_name="Package",
|
||||
),
|
||||
),
|
||||
(
|
||||
"scenario",
|
||||
models.ForeignKey(
|
||||
blank=True,
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name="scenario_additional_information",
|
||||
to="epdb.scenario",
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
"indexes": [
|
||||
models.Index(fields=["type"], name="epdb_additi_type_394349_idx"),
|
||||
models.Index(
|
||||
fields=["scenario", "type"], name="epdb_additi_scenari_a59edf_idx"
|
||||
),
|
||||
models.Index(
|
||||
fields=["content_type", "object_id"], name="epdb_additi_content_44d4b4_idx"
|
||||
),
|
||||
models.Index(
|
||||
fields=["scenario", "content_type", "object_id"],
|
||||
name="epdb_additi_scenari_ef2bf5_idx",
|
||||
),
|
||||
],
|
||||
"constraints": [
|
||||
models.CheckConstraint(
|
||||
condition=models.Q(
|
||||
models.Q(("content_type__isnull", True), ("object_id__isnull", True)),
|
||||
models.Q(("content_type__isnull", False), ("object_id__isnull", False)),
|
||||
_connector="OR",
|
||||
),
|
||||
name="ck_addinfo_gfk_pair",
|
||||
),
|
||||
models.CheckConstraint(
|
||||
condition=models.Q(
|
||||
("scenario__isnull", False),
|
||||
("content_type__isnull", False),
|
||||
_connector="OR",
|
||||
),
|
||||
name="ck_addinfo_not_both_null",
|
||||
),
|
||||
],
|
||||
},
|
||||
),
|
||||
]
|
||||
@ -1,132 +0,0 @@
|
||||
# Generated by Django 5.2.7 on 2026-02-20 12:03
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
def get_additional_information(scenario):
|
||||
from envipy_additional_information import registry
|
||||
from envipy_additional_information.parsers import TypeOfAerationParser
|
||||
|
||||
for k, vals in scenario.additional_information.items():
|
||||
if k == "enzyme":
|
||||
continue
|
||||
|
||||
if k == "SpikeConentration":
|
||||
k = "SpikeConcentration"
|
||||
|
||||
if k == "AerationType":
|
||||
k = "TypeOfAeration"
|
||||
|
||||
for v in vals:
|
||||
# Per default additional fields are ignored
|
||||
MAPPING = {c.__name__: c for c in registry.list_models().values()}
|
||||
try:
|
||||
inst = MAPPING[k](**v)
|
||||
except Exception:
|
||||
if k == "TypeOfAeration":
|
||||
toa = TypeOfAerationParser()
|
||||
inst = toa.from_string(v["type"])
|
||||
|
||||
# Add uuid to uniquely identify objects for manipulation
|
||||
if "uuid" in v:
|
||||
inst.__dict__["uuid"] = v["uuid"]
|
||||
|
||||
yield inst
|
||||
|
||||
|
||||
def forward_func(apps, schema_editor):
|
||||
Scenario = apps.get_model("epdb", "Scenario")
|
||||
ContentType = apps.get_model("contenttypes", "ContentType")
|
||||
AdditionalInformation = apps.get_model("epdb", "AdditionalInformation")
|
||||
|
||||
bulk = []
|
||||
related = []
|
||||
ctype = {o.model: o for o in ContentType.objects.all()}
|
||||
parents = Scenario.objects.prefetch_related(
|
||||
"compound_set",
|
||||
"compoundstructure_set",
|
||||
"reaction_set",
|
||||
"rule_set",
|
||||
"pathway_set",
|
||||
"node_set",
|
||||
"edge_set",
|
||||
).filter(parent__isnull=True)
|
||||
|
||||
for i, scenario in enumerate(parents):
|
||||
print(f"{i + 1}/{len(parents)}", end="\r")
|
||||
if scenario.parent is not None:
|
||||
related.append(scenario.parent)
|
||||
continue
|
||||
|
||||
for ai in get_additional_information(scenario):
|
||||
bulk.append(
|
||||
AdditionalInformation(
|
||||
package=scenario.package,
|
||||
scenario=scenario,
|
||||
type=ai.__class__.__name__,
|
||||
data=ai.model_dump(mode="json"),
|
||||
)
|
||||
)
|
||||
|
||||
print("\n", len(bulk))
|
||||
|
||||
related = Scenario.objects.prefetch_related(
|
||||
"compound_set",
|
||||
"compoundstructure_set",
|
||||
"reaction_set",
|
||||
"rule_set",
|
||||
"pathway_set",
|
||||
"node_set",
|
||||
"edge_set",
|
||||
).filter(parent__isnull=False)
|
||||
|
||||
for i, scenario in enumerate(related):
|
||||
print(f"{i + 1}/{len(related)}", end="\r")
|
||||
parent = scenario.parent
|
||||
# Check to which objects this scenario is attached to
|
||||
for ai in get_additional_information(scenario):
|
||||
rel_objs = [
|
||||
"compound",
|
||||
"compoundstructure",
|
||||
"reaction",
|
||||
"rule",
|
||||
"pathway",
|
||||
"node",
|
||||
"edge",
|
||||
]
|
||||
for rel_obj in rel_objs:
|
||||
for o in getattr(scenario, f"{rel_obj}_set").all():
|
||||
bulk.append(
|
||||
AdditionalInformation(
|
||||
package=scenario.package,
|
||||
scenario=parent,
|
||||
type=ai.__class__.__name__,
|
||||
data=ai.model_dump(mode="json"),
|
||||
content_type=ctype[rel_obj],
|
||||
object_id=o.pk,
|
||||
)
|
||||
)
|
||||
|
||||
print("Start creating additional information objects...")
|
||||
AdditionalInformation.objects.bulk_create(bulk)
|
||||
print("Done!")
|
||||
print(len(bulk))
|
||||
|
||||
Scenario.objects.filter(parent__isnull=False).delete()
|
||||
# Call ai save to fix urls
|
||||
ais = AdditionalInformation.objects.all()
|
||||
total = ais.count()
|
||||
|
||||
for i, ai in enumerate(ais):
|
||||
print(f"{i + 1}/{total}", end="\r")
|
||||
ai.save()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("epdb", "0017_additionalinformation"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(forward_func, reverse_code=migrations.RunPython.noop),
|
||||
]
|
||||
@ -1,20 +0,0 @@
|
||||
# Generated by Django 5.2.7 on 2026-02-23 08:45
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("epdb", "0018_auto_20260220_1203"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name="scenario",
|
||||
name="additional_information",
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name="scenario",
|
||||
name="parent",
|
||||
),
|
||||
]
|
||||
@ -1,65 +0,0 @@
|
||||
# Generated by Django 5.2.7 on 2026-03-09 10:41
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
def populate_polymorphic_ctype(apps, schema_editor):
|
||||
ContentType = apps.get_model("contenttypes", "ContentType")
|
||||
Compound = apps.get_model("epdb", "Compound")
|
||||
CompoundStructure = apps.get_model("epdb", "CompoundStructure")
|
||||
|
||||
# Update Compound records
|
||||
compound_ct = ContentType.objects.get_for_model(Compound)
|
||||
Compound.objects.filter(polymorphic_ctype__isnull=True).update(polymorphic_ctype=compound_ct)
|
||||
|
||||
# Update CompoundStructure records
|
||||
compound_structure_ct = ContentType.objects.get_for_model(CompoundStructure)
|
||||
CompoundStructure.objects.filter(polymorphic_ctype__isnull=True).update(
|
||||
polymorphic_ctype=compound_structure_ct
|
||||
)
|
||||
|
||||
|
||||
def reverse_populate_polymorphic_ctype(apps, schema_editor):
|
||||
Compound = apps.get_model("epdb", "Compound")
|
||||
CompoundStructure = apps.get_model("epdb", "CompoundStructure")
|
||||
|
||||
Compound.objects.all().update(polymorphic_ctype=None)
|
||||
CompoundStructure.objects.all().update(polymorphic_ctype=None)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("contenttypes", "0002_remove_content_type_name"),
|
||||
("epdb", "0019_remove_scenario_additional_information_and_more"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterModelOptions(
|
||||
name="compoundstructure",
|
||||
options={"base_manager_name": "objects"},
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name="compound",
|
||||
name="polymorphic_ctype",
|
||||
field=models.ForeignKey(
|
||||
editable=False,
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name="polymorphic_%(app_label)s.%(class)s_set+",
|
||||
to="contenttypes.contenttype",
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name="compoundstructure",
|
||||
name="polymorphic_ctype",
|
||||
field=models.ForeignKey(
|
||||
editable=False,
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name="polymorphic_%(app_label)s.%(class)s_set+",
|
||||
to="contenttypes.contenttype",
|
||||
),
|
||||
),
|
||||
migrations.RunPython(populate_polymorphic_ctype, reverse_populate_polymorphic_ctype),
|
||||
]
|
||||
1542
epdb/models.py
1542
epdb/models.py
File diff suppressed because it is too large
Load Diff
232
epdb/tasks.py
232
epdb/tasks.py
@ -1,34 +1,19 @@
|
||||
import csv
|
||||
import io
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import Any, Callable, List, Optional
|
||||
from uuid import uuid4
|
||||
|
||||
from celery import shared_task
|
||||
from celery.utils.functional import LRUCache
|
||||
from django.conf import settings as s
|
||||
from django.core.mail import EmailMultiAlternatives
|
||||
from django.utils import timezone
|
||||
|
||||
from epdb.logic import SPathway
|
||||
from epdb.models import (
|
||||
AdditionalInformation,
|
||||
Edge,
|
||||
EPModel,
|
||||
JobLog,
|
||||
Node,
|
||||
Pathway,
|
||||
Rule,
|
||||
Setting,
|
||||
User,
|
||||
)
|
||||
from utilities.chem import FormatConverter
|
||||
from epdb.models import EPModel, JobLog, Node, Package, Pathway, Rule, Setting, User, Edge
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
ML_CACHE = LRUCache(3) # Cache the three most recent ML models to reduce load times.
|
||||
|
||||
Package = s.GET_PACKAGE_MODEL()
|
||||
|
||||
|
||||
def get_ml_model(model_pk: int):
|
||||
if model_pk not in ML_CACHE:
|
||||
@ -44,11 +29,11 @@ def dispatch_eager(user: "User", job: Callable, *args, **kwargs):
|
||||
log.task_id = uuid4()
|
||||
log.job_name = job.__name__
|
||||
log.status = "SUCCESS"
|
||||
log.done_at = timezone.now()
|
||||
log.done_at = datetime.now()
|
||||
log.task_result = str(x) if x else None
|
||||
log.save()
|
||||
|
||||
return log, x
|
||||
return x
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
raise e
|
||||
@ -64,7 +49,7 @@ def dispatch(user: "User", job: Callable, *args, **kwargs):
|
||||
log.status = "INITIAL"
|
||||
log.save()
|
||||
|
||||
return log
|
||||
return x.result
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
raise e
|
||||
@ -76,39 +61,15 @@ def mul(a, b):
|
||||
|
||||
|
||||
@shared_task(queue="predict")
|
||||
def predict_simple(model_pk: int, smiles: str, *args, **kwargs):
|
||||
def predict_simple(model_pk: int, smiles: str):
|
||||
mod = get_ml_model(model_pk)
|
||||
res = mod.predict(smiles, *args, **kwargs)
|
||||
res = mod.predict(smiles)
|
||||
return res
|
||||
|
||||
|
||||
@shared_task(queue="background")
|
||||
def send_registration_mail(user_pk: int):
|
||||
u = User.objects.get(id=user_pk)
|
||||
|
||||
tpl = """Welcome {username}!,
|
||||
|
||||
Thank you for your interest in enviPath.
|
||||
|
||||
The public system is intended for non-commercial use only.
|
||||
We will review your account details and usually activate your account within 24 hours.
|
||||
Once activated, you will be notified by email.
|
||||
|
||||
If we have any questions, we will contact you at this email address.
|
||||
|
||||
Best regards,
|
||||
|
||||
enviPath team"""
|
||||
|
||||
msg = EmailMultiAlternatives(
|
||||
"Your enviPath account",
|
||||
tpl.format(username=u.username),
|
||||
"admin@envipath.org",
|
||||
[u.email],
|
||||
bcc=["admin@envipath.org"],
|
||||
)
|
||||
|
||||
msg.send(fail_silently=False)
|
||||
pass
|
||||
|
||||
|
||||
@shared_task(bind=True, queue="model")
|
||||
@ -175,25 +136,14 @@ def predict(
|
||||
pred_setting_pk: int,
|
||||
limit: Optional[int] = None,
|
||||
node_pk: Optional[int] = None,
|
||||
setting_overrides: Optional[dict] = None,
|
||||
) -> Pathway:
|
||||
pw = Pathway.objects.get(id=pw_pk)
|
||||
setting = Setting.objects.get(id=pred_setting_pk)
|
||||
|
||||
if setting_overrides:
|
||||
for k, v in setting_overrides.items():
|
||||
setattr(setting, k, v)
|
||||
|
||||
# If the setting has a model add/restore it from the cache
|
||||
if setting.model is not None:
|
||||
setting.model = get_ml_model(setting.model.pk)
|
||||
|
||||
kv = {"status": "running"}
|
||||
|
||||
if setting_overrides:
|
||||
kv["setting_overrides"] = setting_overrides
|
||||
|
||||
pw.kv.update(**kv)
|
||||
pw.kv.update(**{"status": "running"})
|
||||
pw.save()
|
||||
|
||||
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||
@ -218,12 +168,10 @@ def predict(
|
||||
spw = SPathway.from_pathway(pw)
|
||||
spw.predict_step(from_node=n)
|
||||
else:
|
||||
spw = SPathway(prediction_setting=setting, persist=pw)
|
||||
spw.predict()
|
||||
raise ValueError("Neither limit nor node_pk given!")
|
||||
|
||||
except Exception as e:
|
||||
pw.kv.update({"status": "failed"})
|
||||
pw.kv.update(**{"error": str(e)})
|
||||
pw.save()
|
||||
|
||||
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||
@ -239,28 +187,9 @@ def predict(
|
||||
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||
JobLog.objects.filter(task_id=self.request.id).update(status="SUCCESS", task_result=pw.url)
|
||||
|
||||
# dispatch property job
|
||||
compute_properties.delay(pw_pk, pred_setting_pk)
|
||||
|
||||
return pw.url
|
||||
|
||||
|
||||
@shared_task(bind=True, queue="background")
|
||||
def compute_properties(self, pathway_pk: int, setting_pk: int):
|
||||
pw = Pathway.objects.get(id=pathway_pk)
|
||||
setting = Setting.objects.get(id=setting_pk)
|
||||
|
||||
nodes = [n for n in pw.nodes]
|
||||
smiles = [n.default_node_label.smiles for n in nodes]
|
||||
|
||||
for prop_mod in setting.property_models.all():
|
||||
if prop_mod.instance().is_heavy():
|
||||
rr = prop_mod.predict_batch(smiles)
|
||||
for idx, pred in enumerate(rr.result):
|
||||
n = nodes[idx]
|
||||
_ = AdditionalInformation.create(pw.package, ai=pred, content_object=n)
|
||||
|
||||
|
||||
@shared_task(bind=True, queue="background")
|
||||
def identify_missing_rules(
|
||||
self,
|
||||
@ -352,144 +281,3 @@ def identify_missing_rules(
|
||||
buffer.seek(0)
|
||||
|
||||
return buffer.getvalue()
|
||||
|
||||
|
||||
@shared_task(bind=True, queue="background")
|
||||
def engineer_pathways(self, pw_pks: List[int], setting_pk: int, target_package_pk: int):
|
||||
from utilities.misc import PathwayUtils
|
||||
|
||||
setting = Setting.objects.get(pk=setting_pk)
|
||||
# Temporarily set model_threshold to 0.0 to keep all tps
|
||||
setting.model_threshold = 0.0
|
||||
|
||||
target = Package.objects.get(pk=target_package_pk)
|
||||
|
||||
intermediate_pathways = []
|
||||
predicted_pathways = []
|
||||
|
||||
for pw in Pathway.objects.filter(pk__in=pw_pks):
|
||||
pu = PathwayUtils(pw)
|
||||
|
||||
eng_pw, node_to_snode_mapping, intermediates = pu.engineer(setting)
|
||||
|
||||
# If we've found intermediates, do the following
|
||||
# - Get a copy of the original pathway and add intermediates
|
||||
# - Store the predicted pathway for further investigation
|
||||
if len(intermediates):
|
||||
copy_mapping = {}
|
||||
copied_pw = pw.copy(target, copy_mapping)
|
||||
copied_pw.name = f"{copied_pw.name} (Engineered)"
|
||||
copied_pw.description = f"The original Pathway can be found here: {pw.url}"
|
||||
copied_pw.save()
|
||||
|
||||
for inter in intermediates:
|
||||
start = copy_mapping[inter[0]]
|
||||
end = copy_mapping[inter[1]]
|
||||
start_snode = inter[2]
|
||||
end_snode = inter[3]
|
||||
for idx, intermediate_edge in enumerate(inter[4]):
|
||||
smiles_to_node = {}
|
||||
|
||||
snodes_to_create = list(
|
||||
set(intermediate_edge.educts + intermediate_edge.products)
|
||||
)
|
||||
|
||||
for snode in snodes_to_create:
|
||||
if snode == start_snode or snode == end_snode:
|
||||
smiles_to_node[snode.smiles] = start if snode == start_snode else end
|
||||
continue
|
||||
|
||||
if snode.smiles not in smiles_to_node:
|
||||
n = Node.create(copied_pw, smiles=snode.smiles, depth=snode.depth)
|
||||
# Used in viz to highlight intermediates
|
||||
n.kv.update({"is_engineered_intermediate": True})
|
||||
n.save()
|
||||
smiles_to_node[snode.smiles] = n
|
||||
|
||||
Edge.create(
|
||||
copied_pw,
|
||||
[smiles_to_node[educt.smiles] for educt in intermediate_edge.educts],
|
||||
[smiles_to_node[product.smiles] for product in intermediate_edge.products],
|
||||
rule=intermediate_edge.rule,
|
||||
)
|
||||
|
||||
# Persist the predicted pathway
|
||||
pred_pw = pu.spathway_to_pathway(target, eng_pw, name=f"{pw.name} (Predicted)")
|
||||
|
||||
intermediate_pathways.append(copied_pw.url)
|
||||
predicted_pathways.append(pred_pw.url)
|
||||
|
||||
return intermediate_pathways, predicted_pathways
|
||||
|
||||
|
||||
@shared_task(bind=True, queue="background")
|
||||
def batch_predict(
|
||||
self,
|
||||
substrates: List[str] | List[List[str]],
|
||||
prediction_setting_pk: int,
|
||||
target_package_pk: int,
|
||||
num_tps: int = 50,
|
||||
):
|
||||
target_package = Package.objects.get(pk=target_package_pk)
|
||||
prediction_setting = Setting.objects.get(pk=prediction_setting_pk)
|
||||
|
||||
if len(substrates) == 0:
|
||||
raise ValueError("No substrates given!")
|
||||
|
||||
is_pair = isinstance(substrates[0], list)
|
||||
|
||||
substrate_and_names = []
|
||||
if not is_pair:
|
||||
for sub in substrates:
|
||||
substrate_and_names.append([sub, None])
|
||||
else:
|
||||
substrate_and_names = substrates
|
||||
|
||||
# Check prerequisite that we can standardize all substrates
|
||||
standardized_substrates_and_smiles = []
|
||||
for substrate in substrate_and_names:
|
||||
try:
|
||||
stand_smiles = FormatConverter.standardize(substrate[0], remove_stereo=True)
|
||||
standardized_substrates_and_smiles.append([stand_smiles, substrate[1]])
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
f'Pathway prediction failed as standardization of SMILES "{substrate}" failed!'
|
||||
)
|
||||
|
||||
pathways = []
|
||||
|
||||
for pair in standardized_substrates_and_smiles:
|
||||
pw = Pathway.create(
|
||||
target_package,
|
||||
pair[0],
|
||||
name=pair[1],
|
||||
predicted=True,
|
||||
)
|
||||
|
||||
# set mode and setting
|
||||
pw.setting = prediction_setting
|
||||
pw.kv.update({"mode": "predict"})
|
||||
pw.save()
|
||||
|
||||
predict(
|
||||
pw.pk,
|
||||
prediction_setting.pk,
|
||||
limit=None,
|
||||
setting_overrides={
|
||||
"max_nodes": num_tps,
|
||||
"max_depth": num_tps,
|
||||
"model_threshold": 0.001,
|
||||
},
|
||||
)
|
||||
|
||||
pathways.append(pw)
|
||||
|
||||
buffer = io.StringIO()
|
||||
|
||||
for idx, pw in enumerate(pathways):
|
||||
# Carry out header only for the first pathway
|
||||
buffer.write(pw.to_csv(include_header=idx == 0, include_pathway_url=True))
|
||||
|
||||
buffer.seek(0)
|
||||
|
||||
return buffer.getvalue()
|
||||
|
||||
@ -1,17 +0,0 @@
|
||||
from collections import defaultdict
|
||||
from threading import Lock
|
||||
|
||||
_registry = defaultdict(list)
|
||||
_lock = Lock()
|
||||
|
||||
|
||||
def register_template(slot: str, template_name: str, *, order: int = 100):
|
||||
item = (order, template_name)
|
||||
with _lock:
|
||||
if item not in _registry[slot]:
|
||||
_registry[slot].append(item)
|
||||
_registry[slot].sort(key=lambda x: x[0])
|
||||
|
||||
|
||||
def get_templates(slot: str):
|
||||
return [template_name for _, template_name in _registry.get(slot, [])]
|
||||
@ -2,8 +2,6 @@ from django import template
|
||||
from pydantic import AnyHttpUrl, ValidationError
|
||||
from pydantic.type_adapter import TypeAdapter
|
||||
|
||||
from epdb.template_registry import get_templates
|
||||
|
||||
register = template.Library()
|
||||
|
||||
url_adapter = TypeAdapter(AnyHttpUrl)
|
||||
@ -21,8 +19,3 @@ def is_url(value):
|
||||
return True
|
||||
except ValidationError:
|
||||
return False
|
||||
|
||||
|
||||
@register.simple_tag
|
||||
def epdb_slot_templates(slot):
|
||||
return get_templates(slot)
|
||||
|
||||
18
epdb/urls.py
18
epdb/urls.py
@ -48,8 +48,6 @@ urlpatterns = [
|
||||
re_path(r"^user$", v.users, name="users"),
|
||||
re_path(r"^group$", v.groups, name="groups"),
|
||||
re_path(r"^search$", v.search, name="search"),
|
||||
re_path(r"^predict$", v.predict_pathway, name="predict_pathway"),
|
||||
re_path(r"^batch-predict$", v.batch_predict_pathway, name="batch_predict_pathway"),
|
||||
# User Detail
|
||||
re_path(rf"^user/(?P<user_uuid>{UUID})", v.user, name="user"),
|
||||
# Group Detail
|
||||
@ -143,11 +141,6 @@ urlpatterns = [
|
||||
v.package_pathway,
|
||||
name="package pathway detail",
|
||||
),
|
||||
re_path(
|
||||
rf"^package/(?P<package_uuid>{UUID})/predict$",
|
||||
v.package_predict_pathway,
|
||||
name="package predict pathway",
|
||||
),
|
||||
# Pathway Nodes
|
||||
re_path(
|
||||
rf"^package/(?P<package_uuid>{UUID})/pathway/(?P<pathway_uuid>{UUID})/node$",
|
||||
@ -197,16 +190,7 @@ urlpatterns = [
|
||||
re_path(r"^indigo/dearomatize$", v.dearomatize, name="indigo_dearomatize"),
|
||||
re_path(r"^indigo/layout$", v.layout, name="indigo_layout"),
|
||||
re_path(r"^depict$", v.depict, name="depict"),
|
||||
path("jobs", v.jobs, name="jobs"),
|
||||
path("jobs/<uuid:job_uuid>", v.job, name="job detail"),
|
||||
re_path(r"^jobs", v.jobs, name="jobs"),
|
||||
# OAuth Stuff
|
||||
path("o/userinfo/", v.userinfo, name="oauth_userinfo"),
|
||||
# Static Pages
|
||||
re_path(r"^terms$", v.static_terms_of_use, name="terms_of_use"),
|
||||
re_path(r"^privacy$", v.static_privacy_policy, name="privacy_policy"),
|
||||
re_path(r"^cookie-policy$", v.static_cookie_policy, name="cookie_policy"),
|
||||
re_path(r"^about$", v.static_about_us, name="about_us"),
|
||||
re_path(r"^contact$", v.static_contact_support, name="contact_support"),
|
||||
re_path(r"^careers$", v.static_careers, name="careers"),
|
||||
re_path(r"^cite$", v.static_cite, name="cite"),
|
||||
]
|
||||
|
||||
1389
epdb/views.py
1389
epdb/views.py
File diff suppressed because it is too large
Load Diff
Binary file not shown.
BIN
fixtures/db.dump
BIN
fixtures/db.dump
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,21 +1,24 @@
|
||||
import gzip
|
||||
import json
|
||||
import logging
|
||||
import os.path
|
||||
from datetime import datetime
|
||||
|
||||
from django.conf import settings as s
|
||||
from django.http import HttpResponseNotAllowed
|
||||
from django.shortcuts import render
|
||||
|
||||
from epdb.logic import PackageManager
|
||||
from epdb.models import Rule, SimpleAmbitRule, Package, CompoundStructure
|
||||
from epdb.views import get_base_context, _anonymous_or_real
|
||||
from utilities.chem import FormatConverter
|
||||
|
||||
|
||||
from rdkit import Chem
|
||||
from rdkit.Chem.MolStandardize import rdMolStandardize
|
||||
|
||||
from epdb.models import CompoundStructure, Rule, SimpleAmbitRule
|
||||
from epdb.views import get_base_context
|
||||
from utilities.chem import FormatConverter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
Package = s.GET_PACKAGE_MODEL()
|
||||
|
||||
|
||||
def normalize_smiles(smiles):
|
||||
m1 = Chem.MolFromSmiles(smiles)
|
||||
@ -56,7 +59,9 @@ def run_both_engines(SMILES, SMIRKS):
|
||||
set(
|
||||
[
|
||||
normalize_smiles(str(x))
|
||||
for x in FormatConverter.sanitize_smiles([str(s) for s in all_rdkit_prods])[0]
|
||||
for x in FormatConverter.sanitize_smiles(
|
||||
[str(s) for s in all_rdkit_prods]
|
||||
)[0]
|
||||
]
|
||||
)
|
||||
)
|
||||
@ -76,9 +81,12 @@ def migration(request):
|
||||
open(s.BASE_DIR / "fixtures" / "migration_status_per_rule.json")
|
||||
)
|
||||
else:
|
||||
BBD = Package.objects.get(uuid="32de3cf4-e3e6-4168-956e-32fa5ddb0ce1")
|
||||
BBD = Package.objects.get(
|
||||
url="http://localhost:8000/package/32de3cf4-e3e6-4168-956e-32fa5ddb0ce1"
|
||||
)
|
||||
ALL_SMILES = [
|
||||
cs.smiles for cs in CompoundStructure.objects.filter(compound__package=BBD)
|
||||
cs.smiles
|
||||
for cs in CompoundStructure.objects.filter(compound__package=BBD)
|
||||
]
|
||||
RULES = SimpleAmbitRule.objects.filter(package=BBD)
|
||||
|
||||
@ -134,7 +142,9 @@ def migration(request):
|
||||
)
|
||||
|
||||
for r in migration_status["results"]:
|
||||
r["detail_url"] = r["detail_url"].replace("http://localhost:8000", s.SERVER_URL)
|
||||
r["detail_url"] = r["detail_url"].replace(
|
||||
"http://localhost:8000", s.SERVER_URL
|
||||
)
|
||||
|
||||
context.update(**migration_status)
|
||||
|
||||
@ -142,10 +152,12 @@ def migration(request):
|
||||
|
||||
|
||||
def migration_detail(request, package_uuid, rule_uuid):
|
||||
current_user = _anonymous_or_real(request)
|
||||
|
||||
if request.method == "GET":
|
||||
context = get_base_context(request)
|
||||
|
||||
BBD = Package.objects.get(uuid="32de3cf4-e3e6-4168-956e-32fa5ddb0ce1")
|
||||
BBD = Package.objects.get(name="EAWAG-BBD")
|
||||
STRUCTURES = CompoundStructure.objects.filter(compound__package=BBD)
|
||||
rule = Rule.objects.get(package=BBD, uuid=rule_uuid)
|
||||
|
||||
@ -223,7 +235,9 @@ def compare(request):
|
||||
context["smirks"] = (
|
||||
"[#1,#6:6][#7;X3;!$(NC1CC1)!$([N][C]=O)!$([!#8]CNC=O):1]([#1,#6:7])[#6;A;X4:2][H:3]>>[#1,#6:6][#7;X3:1]([#1,#6:7])[H:3].[#6;A:2]=O"
|
||||
)
|
||||
context["smiles"] = "C(CC(=O)N[C@@H](CS[Se-])C(=O)NCC(=O)[O-])[C@@H](C(=O)[O-])N"
|
||||
context["smiles"] = (
|
||||
"C(CC(=O)N[C@@H](CS[Se-])C(=O)NCC(=O)[O-])[C@@H](C(=O)[O-])N"
|
||||
)
|
||||
return render(request, "compare.html", context)
|
||||
|
||||
elif request.method == "POST":
|
||||
|
||||
25
package.json
25
package.json
@ -1,25 +0,0 @@
|
||||
{
|
||||
"name": "envipy",
|
||||
"version": "1.0.0",
|
||||
"private": true,
|
||||
"description": "enviPath UI - Tailwind CSS + DaisyUI",
|
||||
"scripts": {
|
||||
"dev": "tailwindcss -i static/css/input.css -o static/css/output.css --watch=always",
|
||||
"build": "tailwindcss -i static/css/input.css -o static/css/output.css --minify"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@tailwindcss/cli": "^4.1.18",
|
||||
"@tailwindcss/postcss": "^4.1.18",
|
||||
"daisyui": "^5.5.14",
|
||||
"postcss": "^8.5.6",
|
||||
"prettier": "^3.7.4",
|
||||
"prettier-plugin-jinja-template": "^2.1.0",
|
||||
"prettier-plugin-tailwindcss": "^0.7.2",
|
||||
"tailwindcss": "^4.1.18"
|
||||
},
|
||||
"keywords": [
|
||||
"django",
|
||||
"tailwindcss",
|
||||
"daisyui"
|
||||
]
|
||||
}
|
||||
@ -1,361 +0,0 @@
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import pickle
|
||||
from datetime import datetime
|
||||
from typing import Any, List, Optional
|
||||
|
||||
import polars as pl
|
||||
|
||||
from pydantic import computed_field
|
||||
from sklearn.metrics import (
|
||||
mean_absolute_error,
|
||||
mean_squared_error,
|
||||
r2_score,
|
||||
root_mean_squared_error,
|
||||
)
|
||||
from sklearn.model_selection import ShuffleSplit
|
||||
|
||||
# Once stable these will be exposed by enviPy-plugins lib
|
||||
from envipy_additional_information import register # noqa: I001
|
||||
from bridge.contracts import Property, PropertyType # noqa: I001
|
||||
from bridge.dto import (
|
||||
BuildResult,
|
||||
EnviPyDTO,
|
||||
EvaluationResult,
|
||||
PredictedProperty,
|
||||
RunResult,
|
||||
) # noqa: I001
|
||||
|
||||
from .impl.pepper import Pepper # noqa: I001
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@register("pepperprediction")
|
||||
class PepperPrediction(PredictedProperty):
|
||||
mean: float | None
|
||||
std: float | None
|
||||
log_mean: float | None
|
||||
log_std: float | None
|
||||
|
||||
@computed_field
|
||||
@property
|
||||
def svg(self, xscale="linear", quantiles=(0.01, 0.99), n_points=2000) -> Optional[str]:
|
||||
import io
|
||||
|
||||
import matplotlib.patches as mpatches
|
||||
import numpy as np
|
||||
from matplotlib import pyplot as plt
|
||||
from scipy import stats
|
||||
|
||||
"""
|
||||
Plot the lognormal distribution of chemical half-lives where parameters are
|
||||
given on a base-10 log scale: log10(half-life) ~ Normal(mu_log10, sigma_log10^2).
|
||||
|
||||
Shades:
|
||||
- x < a in green (Non-persistent)
|
||||
- a <= x <= b in yellow (Persistent)
|
||||
- x > b in red (Very persistent)
|
||||
|
||||
Legend shows the shaded color and the probability mass in each region.
|
||||
"""
|
||||
|
||||
sigma_log10 = self.log_std
|
||||
mu_log10 = self.log_mean
|
||||
|
||||
if sigma_log10 <= 0:
|
||||
raise ValueError("sigma_log10 must be > 0")
|
||||
# Persistent and Very Persistent thresholds in days from REACH (https://doi.org/10.26434/chemrxiv-2025-xmslf)
|
||||
p = 120
|
||||
vp = 180
|
||||
|
||||
# Convert base-10 log parameters to natural-log parameters for SciPy's lognorm
|
||||
ln10 = np.log(10.0)
|
||||
mu_ln = mu_log10 * ln10
|
||||
sigma_ln = sigma_log10 * ln10
|
||||
|
||||
# SciPy parameterization: lognorm(s=sigma_ln, scale=exp(mu_ln))
|
||||
dist = stats.lognorm(s=sigma_ln, scale=np.exp(mu_ln))
|
||||
|
||||
# Exact probabilities
|
||||
p_green = dist.cdf(p) # P(X < p) prob not persistent
|
||||
p_yellow = 1.0 - dist.cdf(p) # P (X > p) prob persistent
|
||||
p_red = 1.0 - dist.cdf(vp) # P(X > vp) prob very persistent
|
||||
|
||||
# Plotting range
|
||||
q_low, q_high = dist.ppf(quantiles)
|
||||
x_min = max(1e-12, min(q_low, p) * 0.9)
|
||||
x_max = max(q_high, vp) * 1.1
|
||||
|
||||
# Build x-grid (linear days axis)
|
||||
if xscale == "log":
|
||||
x = np.logspace(np.log10(x_min), np.log10(x_max), n_points)
|
||||
else:
|
||||
x = np.linspace(x_min, x_max, n_points)
|
||||
y = dist.pdf(x)
|
||||
|
||||
# Masks for shading
|
||||
mask_green = x < p
|
||||
mask_yellow = (x >= p) & (x <= vp)
|
||||
mask_red = x > vp
|
||||
|
||||
# Plot
|
||||
fig, ax = plt.subplots(figsize=(9, 5.5))
|
||||
ax.plot(x, y, color="#1f4e79", lw=2, label="Lognormal PDF")
|
||||
|
||||
if np.any(mask_green):
|
||||
ax.fill_between(x[mask_green], y[mask_green], 0, color="tab:green", alpha=0.3)
|
||||
if np.any(mask_yellow):
|
||||
ax.fill_between(x[mask_yellow], y[mask_yellow], 0, color="gold", alpha=0.35)
|
||||
if np.any(mask_red):
|
||||
ax.fill_between(x[mask_red], y[mask_red], 0, color="tab:red", alpha=0.3)
|
||||
|
||||
# Threshold lines
|
||||
ax.axvline(p, color="gray", ls="--", lw=1)
|
||||
ax.axvline(vp, color="gray", ls="--", lw=1)
|
||||
|
||||
# Labels & title
|
||||
ax.set_title(
|
||||
f"Half-life Distribution (Lognormal)\nlog10 parameters: μ={mu_log10:g}, σ={sigma_log10:g}"
|
||||
)
|
||||
ax.set_xlabel("Half-life (days)")
|
||||
ax.set_ylabel("Probability density")
|
||||
ax.grid(True, alpha=0.25)
|
||||
|
||||
if xscale == "log":
|
||||
ax.set_xscale("log") # not used in this example, but supported
|
||||
|
||||
# Legend with probabilities
|
||||
patches = [
|
||||
mpatches.Patch(
|
||||
color="tab:green",
|
||||
alpha=0.3,
|
||||
label=f"Non-persistent (<{p:g} d): {p_green:.2%}",
|
||||
),
|
||||
mpatches.Patch(
|
||||
color="gold",
|
||||
alpha=0.35,
|
||||
label=f"Persistent ({p:g}–{vp:g} d): {p_yellow:.2%}",
|
||||
),
|
||||
mpatches.Patch(
|
||||
color="tab:red",
|
||||
alpha=0.3,
|
||||
label=f"Very persistent (>{vp:g} d): {p_red:.2%}",
|
||||
),
|
||||
]
|
||||
ax.legend(handles=patches, frameon=True)
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
# --- Export to SVG string ---
|
||||
buf = io.StringIO()
|
||||
fig.savefig(buf, format="svg", bbox_inches="tight")
|
||||
svg = buf.getvalue()
|
||||
plt.close(fig)
|
||||
buf.close()
|
||||
|
||||
return svg
|
||||
|
||||
|
||||
class PEPPER(Property):
|
||||
def identifier(self) -> str:
|
||||
return "pepper"
|
||||
|
||||
def display(self) -> str:
|
||||
return "PEPPER"
|
||||
|
||||
def name(self) -> str:
|
||||
return "Predict Environmental Pollutant PERsistence"
|
||||
|
||||
def requires_rule_packages(self) -> bool:
|
||||
return False
|
||||
|
||||
def requires_data_packages(self) -> bool:
|
||||
return True
|
||||
|
||||
def get_type(self) -> PropertyType:
|
||||
return PropertyType.HEAVY
|
||||
|
||||
def generate_dataset(self, eP: EnviPyDTO) -> pl.DataFrame:
|
||||
"""
|
||||
Generates a dataset in the form of a Polars DataFrame containing compound information, including
|
||||
SMILES strings and logarithmic values of degradation half-lives (dt50).
|
||||
|
||||
The dataset is built by iterating over a list of compounds, standardizing SMILES strings, and
|
||||
calculating the logarithmic mean of the half-life intervals for different environmental scenarios
|
||||
associated with each compound.
|
||||
|
||||
The resulting DataFrame will only include unique rows based on SMILES and logarithmic half-life
|
||||
values.
|
||||
|
||||
Parameters:
|
||||
eP (EnviPyDTO): An object that provides access to compound data and utility functions for
|
||||
standardization and retrieval of half-life information.
|
||||
|
||||
Returns:
|
||||
pl.DataFrame: The resulting dataset with unique rows containing compound structure identifiers,
|
||||
standardized SMILES strings, and logarithmic half-life values.
|
||||
|
||||
Raises:
|
||||
Exception: Exceptions are caught and logged during data processing, specifically when retrieving
|
||||
half-life information.
|
||||
|
||||
Note:
|
||||
- The logarithmic mean is calculated from the start and end intervals of the dt50 (half-life).
|
||||
- Compounds not associated with any half-life data are skipped, and errors encountered during processing
|
||||
are logged without halting the execution.
|
||||
"""
|
||||
columns = ["structure_id", "smiles", "dt50_log"]
|
||||
rows = []
|
||||
|
||||
for c in eP.get_compounds():
|
||||
hls = c.half_lifes()
|
||||
|
||||
if len(hls):
|
||||
stand_smiles = eP.standardize(c.smiles, remove_stereo=True)
|
||||
for scenario, half_lives in hls.items():
|
||||
for h in half_lives:
|
||||
# In the original Pepper code they take the mean of the start and end interval.
|
||||
half_mean = (h.dt50.start + h.dt50.end) / 2
|
||||
rows.append([str(c.url), stand_smiles, math.log10(half_mean)])
|
||||
|
||||
df = pl.DataFrame(data=rows, schema=columns, orient="row", infer_schema_length=None)
|
||||
|
||||
df = df.unique(subset=["smiles", "dt50_log"], keep="any", maintain_order=False)
|
||||
|
||||
return df
|
||||
|
||||
def save_dataset(self, df: pl.DataFrame, path: str):
|
||||
with open(path, "wb") as fh:
|
||||
pickle.dump(df, fh)
|
||||
|
||||
def load_dataset(self, path: str) -> pl.DataFrame:
|
||||
with open(path, "rb") as fh:
|
||||
return pickle.load(fh)
|
||||
|
||||
def build(self, eP: EnviPyDTO, *args, **kwargs) -> BuildResult | None:
|
||||
logger.info(f"Start building PEPPER {eP.get_context().uuid}")
|
||||
df = self.generate_dataset(eP)
|
||||
|
||||
if df.shape[0] == 0:
|
||||
raise ValueError("No data found for building model")
|
||||
|
||||
p = Pepper()
|
||||
|
||||
p, train_ds = p.train_model(df)
|
||||
|
||||
ds_store_path = os.path.join(
|
||||
eP.get_context().work_dir, f"pepper_ds_{eP.get_context().uuid}.pkl"
|
||||
)
|
||||
self.save_dataset(train_ds, ds_store_path)
|
||||
|
||||
model_store_path = os.path.join(
|
||||
eP.get_context().work_dir, f"pepper_{eP.get_context().uuid}.pkl"
|
||||
)
|
||||
p.save_model(model_store_path)
|
||||
logger.info(f"Finished building PEPPER {eP.get_context().uuid}")
|
||||
|
||||
def run(self, eP: EnviPyDTO, *args, **kwargs) -> RunResult:
|
||||
load_path = os.path.join(eP.get_context().work_dir, f"pepper_{eP.get_context().uuid}.pkl")
|
||||
|
||||
p = Pepper.load_model(load_path)
|
||||
|
||||
X_new = [c.smiles for c in eP.get_compounds()]
|
||||
|
||||
predictions = p.predict_batch(X_new)
|
||||
|
||||
results = []
|
||||
|
||||
for p in zip(*predictions):
|
||||
if p[0] is None or p[1] is None:
|
||||
result = {"log_mean": None, "mean": None, "log_std": None, "std": None, "svg": None}
|
||||
else:
|
||||
result = {
|
||||
"log_mean": p[0],
|
||||
"mean": 10 ** p[0],
|
||||
"log_std": p[1],
|
||||
"std": 10 ** p[1],
|
||||
}
|
||||
|
||||
results.append(PepperPrediction(**result))
|
||||
|
||||
rr = RunResult(
|
||||
producer=eP.get_context().url,
|
||||
description=f"Generated at {datetime.now()}",
|
||||
result=results,
|
||||
)
|
||||
|
||||
return rr
|
||||
|
||||
def evaluate(self, eP: EnviPyDTO, *args, **kwargs) -> EvaluationResult | None:
|
||||
logger.info(f"Start evaluating PEPPER {eP.get_context().uuid}")
|
||||
load_path = os.path.join(eP.get_context().work_dir, f"pepper_{eP.get_context().uuid}.pkl")
|
||||
|
||||
p = Pepper.load_model(load_path)
|
||||
|
||||
df = self.generate_dataset(eP)
|
||||
ds = p.preprocess_data(df)
|
||||
|
||||
y_pred = p.predict_batch(ds["smiles"])
|
||||
|
||||
# We only need the mean
|
||||
if isinstance(y_pred, tuple):
|
||||
y_pred = y_pred[0]
|
||||
|
||||
res = self.eval_stats(ds["dt50_bayesian_mean"], y_pred)
|
||||
|
||||
logger.info(f"Finished evaluating PEPPER {eP.get_context().uuid}")
|
||||
return EvaluationResult(data=res)
|
||||
|
||||
def build_and_evaluate(self, eP: EnviPyDTO, *args, **kwargs) -> EvaluationResult | None:
|
||||
logger.info(f"Start evaluating PEPPER {eP.get_context().uuid}")
|
||||
ds_load_path = os.path.join(
|
||||
eP.get_context().work_dir, f"pepper_ds_{eP.get_context().uuid}.pkl"
|
||||
)
|
||||
ds = self.load_dataset(ds_load_path)
|
||||
|
||||
n_splits = kwargs.get("n_splits", 20)
|
||||
shuff = ShuffleSplit(n_splits=n_splits, test_size=0.1, random_state=42)
|
||||
|
||||
fold_metrics: List[dict[str, Any]] = []
|
||||
for split_id, (train_index, test_index) in enumerate(shuff.split(ds)):
|
||||
logger.info(f"Evaluation fold {split_id}/{n_splits} PEPPER {eP.get_context().uuid}")
|
||||
train = ds[train_index]
|
||||
test = ds[test_index]
|
||||
model = Pepper()
|
||||
model.train_model(train, preprocess=False)
|
||||
|
||||
features = test[model.descriptors.get_descriptor_names()].rows()
|
||||
y_pred = model.predict_batch(features, is_smiles=False)
|
||||
|
||||
# We only need the mean for eval statistics but mean, std can be returned
|
||||
if isinstance(y_pred, tuple) or isinstance(y_pred, list):
|
||||
y_pred = y_pred[0]
|
||||
|
||||
# Remove None if they occur
|
||||
y_true_filtered, y_pred_filtered = [], []
|
||||
for t, p in zip(test["dt50_bayesian_mean"], y_pred):
|
||||
if p is None:
|
||||
continue
|
||||
y_true_filtered.append(t)
|
||||
y_pred_filtered.append(p)
|
||||
|
||||
if len(y_true_filtered) == 0:
|
||||
print("Skipping empty fold")
|
||||
continue
|
||||
|
||||
fold_metrics.append(self.eval_stats(y_true_filtered, y_pred_filtered))
|
||||
|
||||
logger.info(f"Finished evaluating PEPPER {eP.get_context().uuid}")
|
||||
return EvaluationResult(data=fold_metrics)
|
||||
|
||||
@staticmethod
|
||||
def eval_stats(y_true, y_pred) -> dict[str, float]:
|
||||
scores_dic = {
|
||||
"r2": r2_score(y_true, y_pred),
|
||||
"mse": mean_squared_error(y_true, y_pred),
|
||||
"rmse": root_mean_squared_error(y_true, y_pred),
|
||||
"mae": mean_absolute_error(y_true, y_pred),
|
||||
}
|
||||
return scores_dic
|
||||
@ -1,196 +0,0 @@
|
||||
import emcee
|
||||
import numpy as np
|
||||
from scipy.stats import lognorm, norm
|
||||
|
||||
|
||||
class Bayesian:
|
||||
def __init__(self, y, comment_list=None):
|
||||
if comment_list is None:
|
||||
comment_list = []
|
||||
self.y = y
|
||||
self.comment_list = comment_list
|
||||
# LOQ default settings
|
||||
self.LOQ_lower = -1 # (2.4 hours)
|
||||
self.LOQ_upper = 3 # 1000 days
|
||||
# prior default settings
|
||||
self.prior_mu_mean = 1.5
|
||||
self.prior_mu_std = 2
|
||||
self.prior_sigma_mean = 0.4
|
||||
self.prior_sigma_std = 0.4
|
||||
self.lower_limit_sigma = 0.2
|
||||
# EMCEE defaults
|
||||
self.nwalkers = 10
|
||||
self.iterations = 2000
|
||||
self.burn_in = 100
|
||||
ndim = 2 # number of dimensions (mean, std)
|
||||
# backend = emcee.backends.HDFBackend("backend.h5")
|
||||
# backend.reset(self.nwalkers, ndim)
|
||||
self.sampler = emcee.EnsembleSampler(self.nwalkers, ndim, self.logPosterior)
|
||||
self.posterior_mu = None
|
||||
self.posterior_sigma = None
|
||||
|
||||
def get_censored_values_only(self):
|
||||
censored_values = []
|
||||
for i, comment in enumerate(self.comment_list):
|
||||
if comment in ["<", ">"]:
|
||||
censored_values.append(self.y[i])
|
||||
elif self.y[i] > self.LOQ_upper or self.y[i] < self.LOQ_lower:
|
||||
censored_values.append(self.y[i])
|
||||
return censored_values
|
||||
|
||||
# Class functions
|
||||
def determine_LOQ(self):
|
||||
"""
|
||||
Determines if the LOQ is upper or lower, and the value (if not default)
|
||||
:return: upper_LOQ , lower_LOQ
|
||||
"""
|
||||
|
||||
censored_values = self.get_censored_values_only()
|
||||
|
||||
# Find upper LOQ
|
||||
upper_LOQ = np.nan
|
||||
# bigger than global LOQ
|
||||
if max(self.y) >= self.LOQ_upper:
|
||||
upper_LOQ = self.LOQ_upper
|
||||
# case if exactly 365 days
|
||||
elif max(self.y) == 2.562: # 365 days
|
||||
upper_LOQ = 2.562
|
||||
self.LOQ_upper = upper_LOQ
|
||||
# case if "bigger than" indication in comments
|
||||
elif ">" in self.comment_list:
|
||||
i = 0
|
||||
while i < len(self.y):
|
||||
if self.y[i] == min(censored_values) and self.comment_list[i] == ">":
|
||||
self.LOQ_upper = self.y[i]
|
||||
break
|
||||
i += 1
|
||||
|
||||
# Find lower LOQ
|
||||
lower_LOQ = np.nan
|
||||
# smaller than global LOQ
|
||||
if min(self.y) <= self.LOQ_lower:
|
||||
lower_LOQ = self.LOQ_lower
|
||||
# case if exactly 1 day
|
||||
elif min(self.y) == 0: # 1 day
|
||||
lower_LOQ = 0
|
||||
self.LOQ_lower = 0
|
||||
# case if "smaller than" indication in comments
|
||||
elif "<" in self.comment_list:
|
||||
i = 0
|
||||
while i < len(self.y):
|
||||
if self.y[i] == max(censored_values) and self.comment_list[i] == "<":
|
||||
self.LOQ_lower = self.y[i]
|
||||
break
|
||||
i += 1
|
||||
return upper_LOQ, lower_LOQ
|
||||
|
||||
def logLikelihood(self, theta, sigma):
|
||||
"""
|
||||
Likelihood function (the probability of a dataset (mean, std) given the model parameters)
|
||||
Convert not censored observations into type ’numeric’
|
||||
:param theta: mean half-life value to be evaluated
|
||||
:param sigma: std half-life value to be evaluated
|
||||
:return: log_likelihood
|
||||
"""
|
||||
upper_LOQ, lower_LOQ = self.determine_LOQ()
|
||||
|
||||
n_censored_upper = 0
|
||||
n_censored_lower = 0
|
||||
y_not_cen = []
|
||||
|
||||
if np.isnan(upper_LOQ) and np.isnan(lower_LOQ):
|
||||
y_not_cen = self.y
|
||||
else:
|
||||
for i in self.y:
|
||||
if np.isnan(upper_LOQ) and i >= upper_LOQ: # censor above threshold
|
||||
n_censored_upper += 1
|
||||
if np.isnan(lower_LOQ) and i <= lower_LOQ: # censor below threshold
|
||||
n_censored_lower += 1
|
||||
else: # do not censor
|
||||
y_not_cen.append(i)
|
||||
|
||||
LL_left_cen = 0
|
||||
LL_right_cen = 0
|
||||
LL_not_cen = 0
|
||||
|
||||
# likelihood for not censored observations
|
||||
if n_censored_lower > 0: # loglikelihood for left censored observations
|
||||
LL_left_cen = n_censored_lower * norm.logcdf(
|
||||
lower_LOQ, loc=theta, scale=sigma
|
||||
) # cumulative distribution function CDF
|
||||
|
||||
if n_censored_upper > 0: # loglikelihood for right censored observations
|
||||
LL_right_cen = n_censored_upper * norm.logsf(
|
||||
upper_LOQ, loc=theta, scale=sigma
|
||||
) # survival function (1-CDF)
|
||||
|
||||
if len(y_not_cen) > 0: # loglikelihood for uncensored values
|
||||
LL_not_cen = sum(
|
||||
norm.logpdf(y_not_cen, loc=theta, scale=sigma)
|
||||
) # probability density function PDF
|
||||
|
||||
return LL_left_cen + LL_not_cen + LL_right_cen
|
||||
|
||||
def get_prior_probability_sigma(self, sigma):
|
||||
# convert mean and sd to logspace parameters, to see this formula check
|
||||
# https://en.wikipedia.org/wiki/Log-normal_distribution under Method of moments section
|
||||
temp = 1 + (self.prior_sigma_std / self.prior_sigma_mean) ** 2
|
||||
meanlog = self.prior_sigma_mean / np.sqrt(temp)
|
||||
sdlog = np.sqrt(np.log(temp))
|
||||
# calculate of logpdf of sigma
|
||||
norm_pdf_sigma = lognorm.logpdf(sigma, s=sdlog, loc=self.lower_limit_sigma, scale=meanlog)
|
||||
return norm_pdf_sigma
|
||||
|
||||
def get_prior_probability_theta(self, theta):
|
||||
norm_pdf_theta = norm.logpdf(theta, loc=self.prior_mu_mean, scale=self.prior_mu_std)
|
||||
return norm_pdf_theta
|
||||
|
||||
def logPrior(self, par):
|
||||
"""
|
||||
Obtain prior loglikelihood of [theta, sigma]
|
||||
:param par: par = [theta,sigma]
|
||||
:return: loglikelihood
|
||||
"""
|
||||
# calculate the mean and standard deviation in the log-space
|
||||
norm_pdf_mean = self.get_prior_probability_theta(par[0])
|
||||
norm_pdf_std = self.get_prior_probability_sigma(par[1])
|
||||
log_norm_pdf = [norm_pdf_mean, norm_pdf_std]
|
||||
return sum(log_norm_pdf)
|
||||
|
||||
def logPosterior(self, par):
|
||||
"""
|
||||
Obtain posterior loglikelihood
|
||||
:param par: [theta, sigma]
|
||||
:return: posterior loglikelihood
|
||||
"""
|
||||
logpri = self.logPrior(par)
|
||||
if not np.isfinite(logpri):
|
||||
return -np.inf
|
||||
loglikelihood = self.logLikelihood(par[0], par[1])
|
||||
return logpri + loglikelihood
|
||||
|
||||
def get_posterior_distribution(self):
|
||||
"""
|
||||
Sample posterior distribution and get median of mean and std samples
|
||||
:return: posterior half-life mean and std
|
||||
"""
|
||||
if self.posterior_mu:
|
||||
return self.posterior_mu, self.posterior_sigma
|
||||
|
||||
# Sampler parameters
|
||||
ndim = 2 # number of dimensions (mean,std)
|
||||
p0 = abs(np.random.randn(self.nwalkers, ndim)) # only positive starting numbers (for std)
|
||||
|
||||
# Sample distribution
|
||||
self.sampler.run_mcmc(p0, self.iterations)
|
||||
# get chain and log_prob in one-dimensional array (merged chains with burn-in)
|
||||
samples = self.sampler.get_chain(flat=True, discard=100)
|
||||
# get median mean and std
|
||||
self.posterior_mu = np.median(samples[:, 0])
|
||||
self.posterior_sigma = np.median(samples[:, 1])
|
||||
return self.posterior_mu, self.posterior_sigma
|
||||
|
||||
|
||||
# Utility functions
|
||||
def get_normal_distribution(x, mu, sig):
|
||||
return np.exp(-np.power(x - mu, 2.0) / (2 * np.power(sig, 2.0)))
|
||||
@ -1,11 +0,0 @@
|
||||
GPR:
|
||||
name: Gaussian Process Regressor
|
||||
regressor: GaussianProcessRegressor
|
||||
regressor_params:
|
||||
normalize_y: True
|
||||
n_restarts_optimizer: 0
|
||||
kernel: "ConstantKernel(1.0, (1e-3, 1e3)) * Matern(length_scale=2.5, length_scale_bounds=(1e-3, 1e3), nu=0.5)"
|
||||
feature_reduction_method: None
|
||||
feature_reduction_parameters:
|
||||
pca:
|
||||
n_components: 34
|
||||
@ -1,60 +0,0 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List
|
||||
|
||||
from mordred import Calculator, descriptors
|
||||
from padelpy import from_smiles
|
||||
from rdkit import Chem
|
||||
|
||||
|
||||
class Descriptor(ABC):
|
||||
@abstractmethod
|
||||
def get_molecule_descriptors(self, molecule: str) -> List[float | int] | None:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_descriptor_names(self) -> List[str]:
|
||||
pass
|
||||
|
||||
|
||||
class Mordred(Descriptor):
|
||||
calc = Calculator(descriptors, ignore_3D=True)
|
||||
|
||||
def get_molecule_descriptors(self, molecule: str) -> List[float | int] | None:
|
||||
mol = Chem.MolFromSmiles(molecule)
|
||||
res = list(self.calc(mol))
|
||||
return res
|
||||
|
||||
def get_descriptor_names(self) -> List[str]:
|
||||
return [f"Mordred_{i}" for i in range(len(self.calc.descriptors))]
|
||||
|
||||
|
||||
class PaDEL(Descriptor):
|
||||
calc = Calculator(descriptors)
|
||||
|
||||
def get_molecule_descriptors(self, molecule: str) -> List[float | int] | None:
|
||||
try:
|
||||
padel_descriptors = from_smiles(molecule, threads=1)
|
||||
except RuntimeError:
|
||||
return []
|
||||
|
||||
formatted = []
|
||||
for k, v in padel_descriptors.items():
|
||||
try:
|
||||
formatted.append(float(v))
|
||||
except ValueError:
|
||||
formatted.append(0.0)
|
||||
|
||||
return formatted
|
||||
|
||||
def get_descriptor_names(self) -> List[str]:
|
||||
return [f"PaDEL_{i}" for i in range(1875)]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
mol = "CC1=CC(O)=CC=C1[N+](=O)[O-]"
|
||||
|
||||
m = Mordred()
|
||||
print(list(m.get_molecule_descriptors(mol)))
|
||||
|
||||
p = PaDEL()
|
||||
print(list(p.get_molecule_descriptors(mol)))
|
||||
@ -1,329 +0,0 @@
|
||||
import importlib.resources
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import pickle
|
||||
from collections import defaultdict
|
||||
from typing import List
|
||||
|
||||
import numpy as np
|
||||
import polars as pl
|
||||
import yaml
|
||||
from joblib import Parallel, delayed
|
||||
from scipy.cluster import hierarchy
|
||||
from scipy.spatial.distance import squareform
|
||||
from scipy.stats import spearmanr
|
||||
from sklearn.feature_selection import VarianceThreshold
|
||||
from sklearn.gaussian_process import GaussianProcessRegressor
|
||||
from sklearn.pipeline import Pipeline
|
||||
from sklearn.preprocessing import FunctionTransformer, MinMaxScaler
|
||||
|
||||
from .bayesian import Bayesian
|
||||
from .descriptors import Mordred
|
||||
|
||||
|
||||
class Pepper:
|
||||
def __init__(self, config_path=None, random_state=42):
|
||||
self.random_state = random_state
|
||||
if config_path is None:
|
||||
config_path = importlib.resources.files("pepper.impl.config").joinpath(
|
||||
"regressor_settings_singlevalue_soil_paper_GPR_optimized.yml"
|
||||
)
|
||||
with open(config_path, "r") as file:
|
||||
regressor_settings = yaml.safe_load(file)
|
||||
if len(regressor_settings) > 1:
|
||||
logging.warning(
|
||||
f"More than one regressor config found in {config_path}, using the first one"
|
||||
)
|
||||
self.regressor_settings = regressor_settings[list(regressor_settings.keys())[0]]
|
||||
if "kernel" in self.regressor_settings["regressor_params"]:
|
||||
from sklearn.gaussian_process.kernels import ConstantKernel, Matern # noqa: F401
|
||||
|
||||
# We could hard-code the kernels they have, maybe better than using eval
|
||||
self.regressor_settings["regressor_params"]["kernel"] = eval(
|
||||
self.regressor_settings["regressor_params"]["kernel"]
|
||||
)
|
||||
# We assume the YAML has the key regressor containing a regressor name
|
||||
self.regressor = self.get_regressor_by_name(self.regressor_settings["regressor"])
|
||||
if "regressor_params" in self.regressor_settings: # Set params if any are given
|
||||
self.regressor.set_params(**self.regressor_settings["regressor_params"])
|
||||
|
||||
# TODO we could make this configurable
|
||||
self.descriptors = Mordred()
|
||||
self.descriptor_subset = None
|
||||
|
||||
self.min_max_scaler = MinMaxScaler().set_output(transform="polars")
|
||||
self.feature_preselector = Pipeline(
|
||||
[
|
||||
(
|
||||
"variance_threshold",
|
||||
VarianceThreshold(threshold=0.02).set_output(transform="polars"),
|
||||
),
|
||||
# Feature selection based on variance threshold
|
||||
(
|
||||
"custom_feature_selection",
|
||||
FunctionTransformer(
|
||||
func=self.remove_highly_correlated_features,
|
||||
validate=False,
|
||||
kw_args={"corr_method": "spearman", "cluster_threshold": 0.01},
|
||||
).set_output(transform="polars"),
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
def get_regressor_by_name(self, regressor_string):
|
||||
"""
|
||||
Load regressor function from a regressor name
|
||||
:param regressor_string: name of regressor as defined in config file (function name with parentheses)
|
||||
:return: Regressor object
|
||||
"""
|
||||
# if regressor_string == 'RandomForestRegressor':
|
||||
# return RandomForestRegressor(random_state=self.random_state)
|
||||
# elif regressor_string == 'GradientBoostingRegressor':
|
||||
# return GradientBoostingRegressor(random_state=self.random_state)
|
||||
# elif regressor_string == 'AdaBoostRegressor':
|
||||
# return AdaBoostRegressor(random_state=self.random_state)
|
||||
# elif regressor_string == 'MLPRegressor':
|
||||
# return MLPRegressor(random_state=self.random_state)
|
||||
# elif regressor_string == 'SVR':
|
||||
# return SVR()
|
||||
# elif regressor_string == 'KNeighborsRegressor':
|
||||
# return KNeighborsRegressor()
|
||||
if regressor_string == "GaussianProcessRegressor":
|
||||
return GaussianProcessRegressor(random_state=self.random_state)
|
||||
# elif regressor_string == 'DecisionTreeRegressor':
|
||||
# return DecisionTreeRegressor(random_state=self.random_state)
|
||||
# elif regressor_string == 'Ridge':
|
||||
# return Ridge(random_state=self.random_state)
|
||||
# elif regressor_string == 'SGDRegressor':
|
||||
# return SGDRegressor(random_state=self.random_state)
|
||||
# elif regressor_string == 'KernelRidge':
|
||||
# return KernelRidge()
|
||||
# elif regressor_string == 'LinearRegression':
|
||||
# return LinearRegression()
|
||||
# elif regressor_string == 'LSVR':
|
||||
# return SVR(kernel='linear') # Linear Support Vector Regressor
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"No regressor type defined for regressor_string = {regressor_string}"
|
||||
)
|
||||
|
||||
def train_model(self, train_data, preprocess=True):
|
||||
"""
|
||||
Fit self.regressor and preprocessors. train_data is a pl.DataFrame
|
||||
"""
|
||||
if preprocess:
|
||||
# Compute the mean and std of half-lives per structure
|
||||
train_data = self.preprocess_data(train_data)
|
||||
|
||||
# train_data structure:
|
||||
# columns = [
|
||||
# "structure_id",
|
||||
# "smiles",
|
||||
# "dt50_log",
|
||||
# "dt50_bayesian_mean",
|
||||
# "dt50_bayesian_std",
|
||||
# ] + self.descriptors.get_descriptor_names()
|
||||
|
||||
# only select descriptor features for feature preselector
|
||||
df = train_data[self.descriptors.get_descriptor_names()]
|
||||
|
||||
# Remove columns having at least None, nan, inf, "" value
|
||||
df = Pepper.keep_clean_columns(df)
|
||||
|
||||
# Scale and Remove highly correlated features as well as features having a low variance
|
||||
x_train_normal = self.min_max_scaler.fit_transform(df)
|
||||
x_train_normal = self.feature_preselector.fit_transform(x_train_normal)
|
||||
|
||||
# Store subset, as this is the input used for prediction
|
||||
self.descriptor_subset = x_train_normal.columns
|
||||
|
||||
y_train = train_data["dt50_bayesian_mean"].to_numpy()
|
||||
y_train_std = train_data["dt50_bayesian_std"].to_numpy()
|
||||
|
||||
self.regressor.set_params(alpha=y_train_std)
|
||||
self.regressor.fit(x_train_normal, y_train)
|
||||
|
||||
return self, train_data
|
||||
|
||||
@staticmethod
|
||||
def keep_clean_columns(df: pl.DataFrame) -> pl.DataFrame:
|
||||
"""
|
||||
Filters out columns from the DataFrame that contain null values, NaN, or infinite values.
|
||||
|
||||
This static method takes a DataFrame as input and evaluates each of its columns to determine
|
||||
if the column contains invalid values. Columns that have null values, NaN, or infinite values
|
||||
are excluded from the resulting DataFrame. The method is especially useful for cleaning up a
|
||||
dataset by keeping only the valid columns.
|
||||
|
||||
Parameters:
|
||||
df (polars.DataFrame): The input DataFrame to be cleaned.
|
||||
|
||||
Returns:
|
||||
polars.DataFrame: A DataFrame containing only columns without null, NaN, or infinite values.
|
||||
"""
|
||||
valid_cols = []
|
||||
|
||||
for col in df.columns:
|
||||
s = df[col]
|
||||
|
||||
# Check nulls
|
||||
has_null = s.null_count() > 0
|
||||
|
||||
# Check NaN and inf only for numeric columns
|
||||
if s.dtype.is_numeric():
|
||||
has_nan = s.is_nan().any()
|
||||
has_inf = s.is_infinite().any()
|
||||
else:
|
||||
has_nan = False
|
||||
has_inf = False
|
||||
|
||||
if not (has_null or has_nan or has_inf):
|
||||
valid_cols.append(col)
|
||||
|
||||
return df.select(valid_cols)
|
||||
|
||||
def preprocess_data(self, dataset):
|
||||
groups = [group for group in dataset.group_by("structure_id")]
|
||||
|
||||
# Unless explicitly set compute everything serial
|
||||
if os.environ.get("N_PEPPER_THREADS", 1) > 1:
|
||||
results = Parallel(n_jobs=os.environ["N_PEPPER_THREADS"])(
|
||||
delayed(compute_bayes_per_group)(group[1])
|
||||
for group in dataset.group_by("structure_id")
|
||||
)
|
||||
else:
|
||||
results = []
|
||||
for g in groups:
|
||||
results.append(compute_bayes_per_group(g[1]))
|
||||
|
||||
bayes_stats = pl.concat(results, how="vertical")
|
||||
dataset = dataset.join(bayes_stats, on="structure_id", how="left")
|
||||
|
||||
# Remove duplicates after calculating mean, std
|
||||
dataset = dataset.unique(subset="structure_id")
|
||||
|
||||
# Calculate and normalise features, make a "desc" column with the features
|
||||
dataset = dataset.with_columns(
|
||||
pl.col("smiles")
|
||||
.map_elements(
|
||||
self.descriptors.get_molecule_descriptors, return_dtype=pl.List(pl.Float64)
|
||||
)
|
||||
.alias("desc")
|
||||
)
|
||||
|
||||
# If a SMILES fails to get desc it is removed
|
||||
dataset = dataset.filter(pl.col("desc").is_not_null() & (pl.col("desc").list.len() > 0))
|
||||
|
||||
# Flatten the features into the dataset
|
||||
dataset = dataset.with_columns(
|
||||
pl.col("desc").list.to_struct(fields=self.descriptors.get_descriptor_names())
|
||||
).unnest("desc")
|
||||
|
||||
return dataset
|
||||
|
||||
def predict_batch(self, batch: List[str], is_smiles: bool = True) -> List[List[float | None]]:
|
||||
if is_smiles:
|
||||
rows = [self.descriptors.get_molecule_descriptors(smiles) for smiles in batch]
|
||||
else:
|
||||
rows = batch
|
||||
|
||||
# Create Dataframe with all descriptors
|
||||
initial_desc_rows_df = pl.DataFrame(
|
||||
data=rows, schema=self.descriptors.get_descriptor_names(), orient="row"
|
||||
)
|
||||
|
||||
# Before checking for invalid values per row, select only required columns
|
||||
initial_desc_rows_df = initial_desc_rows_df.select(
|
||||
list(self.min_max_scaler.feature_names_in_)
|
||||
)
|
||||
|
||||
to_pad = []
|
||||
adjusted_rows = []
|
||||
for i, row in enumerate(initial_desc_rows_df.rows()):
|
||||
# neither infs nor nans are found -> rows seems to be valid input
|
||||
if row and not any(math.isinf(x) for x in row) and not any(math.isnan(x) for x in row):
|
||||
adjusted_rows.append(row)
|
||||
else:
|
||||
to_pad.append(i)
|
||||
|
||||
if adjusted_rows:
|
||||
desc_rows_df = pl.DataFrame(
|
||||
data=adjusted_rows, schema=list(self.min_max_scaler.feature_names_in_), orient="row"
|
||||
)
|
||||
x_normal = self.min_max_scaler.transform(desc_rows_df)
|
||||
x_normal = x_normal[self.descriptor_subset]
|
||||
|
||||
res = self.regressor.predict(x_normal, return_std=True)
|
||||
|
||||
# Convert to lists
|
||||
res = [list(res[0]), list(res[1])]
|
||||
|
||||
# If we had rows containing bad input (inf, nan) insert Nones at the correct position
|
||||
if to_pad:
|
||||
for i in to_pad:
|
||||
res[0].insert(i, None)
|
||||
res[1].insert(i, None)
|
||||
|
||||
return res
|
||||
|
||||
else:
|
||||
return [[None] * len(batch), [None] * len(batch)]
|
||||
|
||||
@staticmethod
|
||||
def remove_highly_correlated_features(
|
||||
X_train,
|
||||
corr_method: str = "spearman",
|
||||
cluster_threshold: float = 0.01,
|
||||
ignore=False,
|
||||
):
|
||||
if ignore:
|
||||
return X_train
|
||||
# pass
|
||||
else:
|
||||
# Using spearmanr from scipy to achieve pandas.corr in polars
|
||||
corr = spearmanr(X_train, axis=0).statistic
|
||||
|
||||
# Ensure the correlation matrix is symmetric
|
||||
corr = (corr + corr.T) / 2
|
||||
np.fill_diagonal(corr, 1)
|
||||
corr = np.nan_to_num(corr)
|
||||
|
||||
# code from https://scikit-learn.org/stable/auto_examples/inspection/
|
||||
# plot_permutation_importance_multicollinear.html
|
||||
# We convert the correlation matrix to a distance matrix before performing
|
||||
# hierarchical clustering using Ward's linkage.
|
||||
distance_matrix = 1 - np.abs(corr)
|
||||
dist_linkage = hierarchy.ward(squareform(distance_matrix))
|
||||
|
||||
cluster_ids = hierarchy.fcluster(dist_linkage, cluster_threshold, criterion="distance")
|
||||
cluster_id_to_feature_ids = defaultdict(list)
|
||||
|
||||
for idx, cluster_id in enumerate(cluster_ids):
|
||||
cluster_id_to_feature_ids[cluster_id].append(idx)
|
||||
|
||||
my_selected_features = [v[0] for v in cluster_id_to_feature_ids.values()]
|
||||
X_train_sel = X_train[:, my_selected_features]
|
||||
|
||||
return X_train_sel
|
||||
|
||||
def save_model(self, path):
|
||||
with open(path, "wb") as save_file:
|
||||
pickle.dump(self, save_file, protocol=5)
|
||||
|
||||
@staticmethod
|
||||
def load_model(path) -> "Pepper":
|
||||
with open(path, "rb") as load_file:
|
||||
return pickle.load(load_file)
|
||||
|
||||
|
||||
def compute_bayes_per_group(group):
|
||||
"""Get mean and std using bayesian"""
|
||||
mean, std = Bayesian(group["dt50_log"]).get_posterior_distribution()
|
||||
return pl.DataFrame(
|
||||
{
|
||||
"structure_id": [group["structure_id"][0]],
|
||||
"dt50_bayesian_mean": [mean],
|
||||
"dt50_bayesian_std": [std],
|
||||
}
|
||||
)
|
||||
740
pnpm-lock.yaml
generated
740
pnpm-lock.yaml
generated
@ -1,740 +0,0 @@
|
||||
lockfileVersion: '9.0'
|
||||
|
||||
settings:
|
||||
autoInstallPeers: true
|
||||
excludeLinksFromLockfile: false
|
||||
|
||||
importers:
|
||||
|
||||
.:
|
||||
devDependencies:
|
||||
'@tailwindcss/cli':
|
||||
specifier: ^4.1.18
|
||||
version: 4.1.18
|
||||
'@tailwindcss/postcss':
|
||||
specifier: ^4.1.18
|
||||
version: 4.1.18
|
||||
daisyui:
|
||||
specifier: ^5.5.14
|
||||
version: 5.5.14
|
||||
postcss:
|
||||
specifier: ^8.5.6
|
||||
version: 8.5.6
|
||||
prettier:
|
||||
specifier: ^3.7.4
|
||||
version: 3.7.4
|
||||
prettier-plugin-jinja-template:
|
||||
specifier: ^2.1.0
|
||||
version: 2.1.0(prettier@3.7.4)
|
||||
prettier-plugin-tailwindcss:
|
||||
specifier: ^0.7.2
|
||||
version: 0.7.2(prettier@3.7.4)
|
||||
tailwindcss:
|
||||
specifier: ^4.1.18
|
||||
version: 4.1.18
|
||||
|
||||
packages:
|
||||
|
||||
'@alloc/quick-lru@5.2.0':
|
||||
resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==}
|
||||
engines: {node: '>=10'}
|
||||
|
||||
'@jridgewell/gen-mapping@0.3.13':
|
||||
resolution: {integrity: sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==}
|
||||
|
||||
'@jridgewell/remapping@2.3.5':
|
||||
resolution: {integrity: sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==}
|
||||
|
||||
'@jridgewell/resolve-uri@3.1.2':
|
||||
resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==}
|
||||
engines: {node: '>=6.0.0'}
|
||||
|
||||
'@jridgewell/sourcemap-codec@1.5.5':
|
||||
resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==}
|
||||
|
||||
'@jridgewell/trace-mapping@0.3.31':
|
||||
resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==}
|
||||
|
||||
'@parcel/watcher-android-arm64@2.5.1':
|
||||
resolution: {integrity: sha512-KF8+j9nNbUN8vzOFDpRMsaKBHZ/mcjEjMToVMJOhTozkDonQFFrRcfdLWn6yWKCmJKmdVxSgHiYvTCef4/qcBA==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [android]
|
||||
|
||||
'@parcel/watcher-darwin-arm64@2.5.1':
|
||||
resolution: {integrity: sha512-eAzPv5osDmZyBhou8PoF4i6RQXAfeKL9tjb3QzYuccXFMQU0ruIc/POh30ePnaOyD1UXdlKguHBmsTs53tVoPw==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
|
||||
'@parcel/watcher-darwin-x64@2.5.1':
|
||||
resolution: {integrity: sha512-1ZXDthrnNmwv10A0/3AJNZ9JGlzrF82i3gNQcWOzd7nJ8aj+ILyW1MTxVk35Db0u91oD5Nlk9MBiujMlwmeXZg==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
|
||||
'@parcel/watcher-freebsd-x64@2.5.1':
|
||||
resolution: {integrity: sha512-SI4eljM7Flp9yPuKi8W0ird8TI/JK6CSxju3NojVI6BjHsTyK7zxA9urjVjEKJ5MBYC+bLmMcbAWlZ+rFkLpJQ==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [x64]
|
||||
os: [freebsd]
|
||||
|
||||
'@parcel/watcher-linux-arm-glibc@2.5.1':
|
||||
resolution: {integrity: sha512-RCdZlEyTs8geyBkkcnPWvtXLY44BCeZKmGYRtSgtwwnHR4dxfHRG3gR99XdMEdQ7KeiDdasJwwvNSF5jKtDwdA==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
|
||||
'@parcel/watcher-linux-arm-musl@2.5.1':
|
||||
resolution: {integrity: sha512-6E+m/Mm1t1yhB8X412stiKFG3XykmgdIOqhjWj+VL8oHkKABfu/gjFj8DvLrYVHSBNC+/u5PeNrujiSQ1zwd1Q==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
|
||||
'@parcel/watcher-linux-arm64-glibc@2.5.1':
|
||||
resolution: {integrity: sha512-LrGp+f02yU3BN9A+DGuY3v3bmnFUggAITBGriZHUREfNEzZh/GO06FF5u2kx8x+GBEUYfyTGamol4j3m9ANe8w==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@parcel/watcher-linux-arm64-musl@2.5.1':
|
||||
resolution: {integrity: sha512-cFOjABi92pMYRXS7AcQv9/M1YuKRw8SZniCDw0ssQb/noPkRzA+HBDkwmyOJYp5wXcsTrhxO0zq1U11cK9jsFg==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@parcel/watcher-linux-x64-glibc@2.5.1':
|
||||
resolution: {integrity: sha512-GcESn8NZySmfwlTsIur+49yDqSny2IhPeZfXunQi48DMugKeZ7uy1FX83pO0X22sHntJ4Ub+9k34XQCX+oHt2A==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@parcel/watcher-linux-x64-musl@2.5.1':
|
||||
resolution: {integrity: sha512-n0E2EQbatQ3bXhcH2D1XIAANAcTZkQICBPVaxMeaCVBtOpBZpWJuf7LwyWPSBDITb7In8mqQgJ7gH8CILCURXg==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@parcel/watcher-win32-arm64@2.5.1':
|
||||
resolution: {integrity: sha512-RFzklRvmc3PkjKjry3hLF9wD7ppR4AKcWNzH7kXR7GUe0Igb3Nz8fyPwtZCSquGrhU5HhUNDr/mKBqj7tqA2Vw==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [win32]
|
||||
|
||||
'@parcel/watcher-win32-ia32@2.5.1':
|
||||
resolution: {integrity: sha512-c2KkcVN+NJmuA7CGlaGD1qJh1cLfDnQsHjE89E60vUEMlqduHGCdCLJCID5geFVM0dOtA3ZiIO8BoEQmzQVfpQ==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [ia32]
|
||||
os: [win32]
|
||||
|
||||
'@parcel/watcher-win32-x64@2.5.1':
|
||||
resolution: {integrity: sha512-9lHBdJITeNR++EvSQVUcaZoWupyHfXe1jZvGZ06O/5MflPcuPLtEphScIBL+AiCWBO46tDSHzWyD0uDmmZqsgA==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
|
||||
'@parcel/watcher@2.5.1':
|
||||
resolution: {integrity: sha512-dfUnCxiN9H4ap84DvD2ubjw+3vUNpstxa0TneY/Paat8a3R4uQZDLSvWjmznAY/DoahqTHl9V46HF/Zs3F29pg==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
|
||||
'@tailwindcss/cli@4.1.18':
|
||||
resolution: {integrity: sha512-sMZ+lZbDyxwjD2E0L7oRUjJ01Ffjtme5OtjvvnC+cV4CEDcbqzbp25TCpxHj6kWLU9+DlqJOiNgSOgctC2aZmg==}
|
||||
hasBin: true
|
||||
|
||||
'@tailwindcss/node@4.1.18':
|
||||
resolution: {integrity: sha512-DoR7U1P7iYhw16qJ49fgXUlry1t4CpXeErJHnQ44JgTSKMaZUdf17cfn5mHchfJ4KRBZRFA/Coo+MUF5+gOaCQ==}
|
||||
|
||||
'@tailwindcss/oxide-android-arm64@4.1.18':
|
||||
resolution: {integrity: sha512-dJHz7+Ugr9U/diKJA0W6N/6/cjI+ZTAoxPf9Iz9BFRF2GzEX8IvXxFIi/dZBloVJX/MZGvRuFA9rqwdiIEZQ0Q==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [android]
|
||||
|
||||
'@tailwindcss/oxide-darwin-arm64@4.1.18':
|
||||
resolution: {integrity: sha512-Gc2q4Qhs660bhjyBSKgq6BYvwDz4G+BuyJ5H1xfhmDR3D8HnHCmT/BSkvSL0vQLy/nkMLY20PQ2OoYMO15Jd0A==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
|
||||
'@tailwindcss/oxide-darwin-x64@4.1.18':
|
||||
resolution: {integrity: sha512-FL5oxr2xQsFrc3X9o1fjHKBYBMD1QZNyc1Xzw/h5Qu4XnEBi3dZn96HcHm41c/euGV+GRiXFfh2hUCyKi/e+yw==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
|
||||
'@tailwindcss/oxide-freebsd-x64@4.1.18':
|
||||
resolution: {integrity: sha512-Fj+RHgu5bDodmV1dM9yAxlfJwkkWvLiRjbhuO2LEtwtlYlBgiAT4x/j5wQr1tC3SANAgD+0YcmWVrj8R9trVMA==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [freebsd]
|
||||
|
||||
'@tailwindcss/oxide-linux-arm-gnueabihf@4.1.18':
|
||||
resolution: {integrity: sha512-Fp+Wzk/Ws4dZn+LV2Nqx3IilnhH51YZoRaYHQsVq3RQvEl+71VGKFpkfHrLM/Li+kt5c0DJe/bHXK1eHgDmdiA==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
|
||||
'@tailwindcss/oxide-linux-arm64-gnu@4.1.18':
|
||||
resolution: {integrity: sha512-S0n3jboLysNbh55Vrt7pk9wgpyTTPD0fdQeh7wQfMqLPM/Hrxi+dVsLsPrycQjGKEQk85Kgbx+6+QnYNiHalnw==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@tailwindcss/oxide-linux-arm64-musl@4.1.18':
|
||||
resolution: {integrity: sha512-1px92582HkPQlaaCkdRcio71p8bc8i/ap5807tPRDK/uw953cauQBT8c5tVGkOwrHMfc2Yh6UuxaH4vtTjGvHg==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@tailwindcss/oxide-linux-x64-gnu@4.1.18':
|
||||
resolution: {integrity: sha512-v3gyT0ivkfBLoZGF9LyHmts0Isc8jHZyVcbzio6Wpzifg/+5ZJpDiRiUhDLkcr7f/r38SWNe7ucxmGW3j3Kb/g==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@tailwindcss/oxide-linux-x64-musl@4.1.18':
|
||||
resolution: {integrity: sha512-bhJ2y2OQNlcRwwgOAGMY0xTFStt4/wyU6pvI6LSuZpRgKQwxTec0/3Scu91O8ir7qCR3AuepQKLU/kX99FouqQ==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@tailwindcss/oxide-wasm32-wasi@4.1.18':
|
||||
resolution: {integrity: sha512-LffYTvPjODiP6PT16oNeUQJzNVyJl1cjIebq/rWWBF+3eDst5JGEFSc5cWxyRCJ0Mxl+KyIkqRxk1XPEs9x8TA==}
|
||||
engines: {node: '>=14.0.0'}
|
||||
cpu: [wasm32]
|
||||
bundledDependencies:
|
||||
- '@napi-rs/wasm-runtime'
|
||||
- '@emnapi/core'
|
||||
- '@emnapi/runtime'
|
||||
- '@tybys/wasm-util'
|
||||
- '@emnapi/wasi-threads'
|
||||
- tslib
|
||||
|
||||
'@tailwindcss/oxide-win32-arm64-msvc@4.1.18':
|
||||
resolution: {integrity: sha512-HjSA7mr9HmC8fu6bdsZvZ+dhjyGCLdotjVOgLA2vEqxEBZaQo9YTX4kwgEvPCpRh8o4uWc4J/wEoFzhEmjvPbA==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [win32]
|
||||
|
||||
'@tailwindcss/oxide-win32-x64-msvc@4.1.18':
|
||||
resolution: {integrity: sha512-bJWbyYpUlqamC8dpR7pfjA0I7vdF6t5VpUGMWRkXVE3AXgIZjYUYAK7II1GNaxR8J1SSrSrppRar8G++JekE3Q==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
|
||||
'@tailwindcss/oxide@4.1.18':
|
||||
resolution: {integrity: sha512-EgCR5tTS5bUSKQgzeMClT6iCY3ToqE1y+ZB0AKldj809QXk1Y+3jB0upOYZrn9aGIzPtUsP7sX4QQ4XtjBB95A==}
|
||||
engines: {node: '>= 10'}
|
||||
|
||||
'@tailwindcss/postcss@4.1.18':
|
||||
resolution: {integrity: sha512-Ce0GFnzAOuPyfV5SxjXGn0CubwGcuDB0zcdaPuCSzAa/2vII24JTkH+I6jcbXLb1ctjZMZZI6OjDaLPJQL1S0g==}
|
||||
|
||||
braces@3.0.3:
|
||||
resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==}
|
||||
engines: {node: '>=8'}
|
||||
|
||||
daisyui@5.5.14:
|
||||
resolution: {integrity: sha512-L47rvw7I7hK68TA97VB8Ee0woHew+/ohR6Lx6Ah/krfISOqcG4My7poNpX5Mo5/ytMxiR40fEaz6njzDi7cuSg==}
|
||||
|
||||
detect-libc@1.0.3:
|
||||
resolution: {integrity: sha512-pGjwhsmsp4kL2RTz08wcOlGN83otlqHeD/Z5T8GXZB+/YcpQ/dgo+lbU8ZsGxV0HIvqqxo9l7mqYwyYMD9bKDg==}
|
||||
engines: {node: '>=0.10'}
|
||||
hasBin: true
|
||||
|
||||
detect-libc@2.1.2:
|
||||
resolution: {integrity: sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==}
|
||||
engines: {node: '>=8'}
|
||||
|
||||
enhanced-resolve@5.18.4:
|
||||
resolution: {integrity: sha512-LgQMM4WXU3QI+SYgEc2liRgznaD5ojbmY3sb8LxyguVkIg5FxdpTkvk72te2R38/TGKxH634oLxXRGY6d7AP+Q==}
|
||||
engines: {node: '>=10.13.0'}
|
||||
|
||||
fill-range@7.1.1:
|
||||
resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==}
|
||||
engines: {node: '>=8'}
|
||||
|
||||
graceful-fs@4.2.11:
|
||||
resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==}
|
||||
|
||||
is-extglob@2.1.1:
|
||||
resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==}
|
||||
engines: {node: '>=0.10.0'}
|
||||
|
||||
is-glob@4.0.3:
|
||||
resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==}
|
||||
engines: {node: '>=0.10.0'}
|
||||
|
||||
is-number@7.0.0:
|
||||
resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==}
|
||||
engines: {node: '>=0.12.0'}
|
||||
|
||||
jiti@2.6.1:
|
||||
resolution: {integrity: sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==}
|
||||
hasBin: true
|
||||
|
||||
lightningcss-android-arm64@1.30.2:
|
||||
resolution: {integrity: sha512-BH9sEdOCahSgmkVhBLeU7Hc9DWeZ1Eb6wNS6Da8igvUwAe0sqROHddIlvU06q3WyXVEOYDZ6ykBZQnjTbmo4+A==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [android]
|
||||
|
||||
lightningcss-darwin-arm64@1.30.2:
|
||||
resolution: {integrity: sha512-ylTcDJBN3Hp21TdhRT5zBOIi73P6/W0qwvlFEk22fkdXchtNTOU4Qc37SkzV+EKYxLouZ6M4LG9NfZ1qkhhBWA==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
|
||||
lightningcss-darwin-x64@1.30.2:
|
||||
resolution: {integrity: sha512-oBZgKchomuDYxr7ilwLcyms6BCyLn0z8J0+ZZmfpjwg9fRVZIR5/GMXd7r9RH94iDhld3UmSjBM6nXWM2TfZTQ==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
|
||||
lightningcss-freebsd-x64@1.30.2:
|
||||
resolution: {integrity: sha512-c2bH6xTrf4BDpK8MoGG4Bd6zAMZDAXS569UxCAGcA7IKbHNMlhGQ89eRmvpIUGfKWNVdbhSbkQaWhEoMGmGslA==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [x64]
|
||||
os: [freebsd]
|
||||
|
||||
lightningcss-linux-arm-gnueabihf@1.30.2:
|
||||
resolution: {integrity: sha512-eVdpxh4wYcm0PofJIZVuYuLiqBIakQ9uFZmipf6LF/HRj5Bgm0eb3qL/mr1smyXIS1twwOxNWndd8z0E374hiA==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
|
||||
lightningcss-linux-arm64-gnu@1.30.2:
|
||||
resolution: {integrity: sha512-UK65WJAbwIJbiBFXpxrbTNArtfuznvxAJw4Q2ZGlU8kPeDIWEX1dg3rn2veBVUylA2Ezg89ktszWbaQnxD/e3A==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
lightningcss-linux-arm64-musl@1.30.2:
|
||||
resolution: {integrity: sha512-5Vh9dGeblpTxWHpOx8iauV02popZDsCYMPIgiuw97OJ5uaDsL86cnqSFs5LZkG3ghHoX5isLgWzMs+eD1YzrnA==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
lightningcss-linux-x64-gnu@1.30.2:
|
||||
resolution: {integrity: sha512-Cfd46gdmj1vQ+lR6VRTTadNHu6ALuw2pKR9lYq4FnhvgBc4zWY1EtZcAc6EffShbb1MFrIPfLDXD6Xprbnni4w==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
lightningcss-linux-x64-musl@1.30.2:
|
||||
resolution: {integrity: sha512-XJaLUUFXb6/QG2lGIW6aIk6jKdtjtcffUT0NKvIqhSBY3hh9Ch+1LCeH80dR9q9LBjG3ewbDjnumefsLsP6aiA==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
lightningcss-win32-arm64-msvc@1.30.2:
|
||||
resolution: {integrity: sha512-FZn+vaj7zLv//D/192WFFVA0RgHawIcHqLX9xuWiQt7P0PtdFEVaxgF9rjM/IRYHQXNnk61/H/gb2Ei+kUQ4xQ==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [win32]
|
||||
|
||||
lightningcss-win32-x64-msvc@1.30.2:
|
||||
resolution: {integrity: sha512-5g1yc73p+iAkid5phb4oVFMB45417DkRevRbt/El/gKXJk4jid+vPFF/AXbxn05Aky8PapwzZrdJShv5C0avjw==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
|
||||
lightningcss@1.30.2:
|
||||
resolution: {integrity: sha512-utfs7Pr5uJyyvDETitgsaqSyjCb2qNRAtuqUeWIAKztsOYdcACf2KtARYXg2pSvhkt+9NfoaNY7fxjl6nuMjIQ==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
|
||||
magic-string@0.30.21:
|
||||
resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==}
|
||||
|
||||
micromatch@4.0.8:
|
||||
resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==}
|
||||
engines: {node: '>=8.6'}
|
||||
|
||||
mri@1.2.0:
|
||||
resolution: {integrity: sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==}
|
||||
engines: {node: '>=4'}
|
||||
|
||||
nanoid@3.3.11:
|
||||
resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==}
|
||||
engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
|
||||
hasBin: true
|
||||
|
||||
node-addon-api@7.1.1:
|
||||
resolution: {integrity: sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==}
|
||||
|
||||
picocolors@1.1.1:
|
||||
resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==}
|
||||
|
||||
picomatch@2.3.1:
|
||||
resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==}
|
||||
engines: {node: '>=8.6'}
|
||||
|
||||
postcss@8.5.6:
|
||||
resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==}
|
||||
engines: {node: ^10 || ^12 || >=14}
|
||||
|
||||
prettier-plugin-jinja-template@2.1.0:
|
||||
resolution: {integrity: sha512-mzoCp2Oy9BDSug80fw3B3J4n4KQj1hRvoQOL1akqcDKBb5nvYxrik9zUEDs4AEJ6nK7QDTGoH0y9rx7AlnQ78Q==}
|
||||
peerDependencies:
|
||||
prettier: ^3.0.0
|
||||
|
||||
prettier-plugin-tailwindcss@0.7.2:
|
||||
resolution: {integrity: sha512-LkphyK3Fw+q2HdMOoiEHWf93fNtYJwfamoKPl7UwtjFQdei/iIBoX11G6j706FzN3ymX9mPVi97qIY8328vdnA==}
|
||||
engines: {node: '>=20.19'}
|
||||
peerDependencies:
|
||||
'@ianvs/prettier-plugin-sort-imports': '*'
|
||||
'@prettier/plugin-hermes': '*'
|
||||
'@prettier/plugin-oxc': '*'
|
||||
'@prettier/plugin-pug': '*'
|
||||
'@shopify/prettier-plugin-liquid': '*'
|
||||
'@trivago/prettier-plugin-sort-imports': '*'
|
||||
'@zackad/prettier-plugin-twig': '*'
|
||||
prettier: ^3.0
|
||||
prettier-plugin-astro: '*'
|
||||
prettier-plugin-css-order: '*'
|
||||
prettier-plugin-jsdoc: '*'
|
||||
prettier-plugin-marko: '*'
|
||||
prettier-plugin-multiline-arrays: '*'
|
||||
prettier-plugin-organize-attributes: '*'
|
||||
prettier-plugin-organize-imports: '*'
|
||||
prettier-plugin-sort-imports: '*'
|
||||
prettier-plugin-svelte: '*'
|
||||
peerDependenciesMeta:
|
||||
'@ianvs/prettier-plugin-sort-imports':
|
||||
optional: true
|
||||
'@prettier/plugin-hermes':
|
||||
optional: true
|
||||
'@prettier/plugin-oxc':
|
||||
optional: true
|
||||
'@prettier/plugin-pug':
|
||||
optional: true
|
||||
'@shopify/prettier-plugin-liquid':
|
||||
optional: true
|
||||
'@trivago/prettier-plugin-sort-imports':
|
||||
optional: true
|
||||
'@zackad/prettier-plugin-twig':
|
||||
optional: true
|
||||
prettier-plugin-astro:
|
||||
optional: true
|
||||
prettier-plugin-css-order:
|
||||
optional: true
|
||||
prettier-plugin-jsdoc:
|
||||
optional: true
|
||||
prettier-plugin-marko:
|
||||
optional: true
|
||||
prettier-plugin-multiline-arrays:
|
||||
optional: true
|
||||
prettier-plugin-organize-attributes:
|
||||
optional: true
|
||||
prettier-plugin-organize-imports:
|
||||
optional: true
|
||||
prettier-plugin-sort-imports:
|
||||
optional: true
|
||||
prettier-plugin-svelte:
|
||||
optional: true
|
||||
|
||||
prettier@3.7.4:
|
||||
resolution: {integrity: sha512-v6UNi1+3hSlVvv8fSaoUbggEM5VErKmmpGA7Pl3HF8V6uKY7rvClBOJlH6yNwQtfTueNkGVpOv/mtWL9L4bgRA==}
|
||||
engines: {node: '>=14'}
|
||||
hasBin: true
|
||||
|
||||
source-map-js@1.2.1:
|
||||
resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==}
|
||||
engines: {node: '>=0.10.0'}
|
||||
|
||||
tailwindcss@4.1.18:
|
||||
resolution: {integrity: sha512-4+Z+0yiYyEtUVCScyfHCxOYP06L5Ne+JiHhY2IjR2KWMIWhJOYZKLSGZaP5HkZ8+bY0cxfzwDE5uOmzFXyIwxw==}
|
||||
|
||||
tapable@2.3.0:
|
||||
resolution: {integrity: sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==}
|
||||
engines: {node: '>=6'}
|
||||
|
||||
to-regex-range@5.0.1:
|
||||
resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==}
|
||||
engines: {node: '>=8.0'}
|
||||
|
||||
snapshots:
|
||||
|
||||
'@alloc/quick-lru@5.2.0': {}
|
||||
|
||||
'@jridgewell/gen-mapping@0.3.13':
|
||||
dependencies:
|
||||
'@jridgewell/sourcemap-codec': 1.5.5
|
||||
'@jridgewell/trace-mapping': 0.3.31
|
||||
|
||||
'@jridgewell/remapping@2.3.5':
|
||||
dependencies:
|
||||
'@jridgewell/gen-mapping': 0.3.13
|
||||
'@jridgewell/trace-mapping': 0.3.31
|
||||
|
||||
'@jridgewell/resolve-uri@3.1.2': {}
|
||||
|
||||
'@jridgewell/sourcemap-codec@1.5.5': {}
|
||||
|
||||
'@jridgewell/trace-mapping@0.3.31':
|
||||
dependencies:
|
||||
'@jridgewell/resolve-uri': 3.1.2
|
||||
'@jridgewell/sourcemap-codec': 1.5.5
|
||||
|
||||
'@parcel/watcher-android-arm64@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-darwin-arm64@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-darwin-x64@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-freebsd-x64@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-linux-arm-glibc@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-linux-arm-musl@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-linux-arm64-glibc@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-linux-arm64-musl@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-linux-x64-glibc@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-linux-x64-musl@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-win32-arm64@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-win32-ia32@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-win32-x64@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher@2.5.1':
|
||||
dependencies:
|
||||
detect-libc: 1.0.3
|
||||
is-glob: 4.0.3
|
||||
micromatch: 4.0.8
|
||||
node-addon-api: 7.1.1
|
||||
optionalDependencies:
|
||||
'@parcel/watcher-android-arm64': 2.5.1
|
||||
'@parcel/watcher-darwin-arm64': 2.5.1
|
||||
'@parcel/watcher-darwin-x64': 2.5.1
|
||||
'@parcel/watcher-freebsd-x64': 2.5.1
|
||||
'@parcel/watcher-linux-arm-glibc': 2.5.1
|
||||
'@parcel/watcher-linux-arm-musl': 2.5.1
|
||||
'@parcel/watcher-linux-arm64-glibc': 2.5.1
|
||||
'@parcel/watcher-linux-arm64-musl': 2.5.1
|
||||
'@parcel/watcher-linux-x64-glibc': 2.5.1
|
||||
'@parcel/watcher-linux-x64-musl': 2.5.1
|
||||
'@parcel/watcher-win32-arm64': 2.5.1
|
||||
'@parcel/watcher-win32-ia32': 2.5.1
|
||||
'@parcel/watcher-win32-x64': 2.5.1
|
||||
|
||||
'@tailwindcss/cli@4.1.18':
|
||||
dependencies:
|
||||
'@parcel/watcher': 2.5.1
|
||||
'@tailwindcss/node': 4.1.18
|
||||
'@tailwindcss/oxide': 4.1.18
|
||||
enhanced-resolve: 5.18.4
|
||||
mri: 1.2.0
|
||||
picocolors: 1.1.1
|
||||
tailwindcss: 4.1.18
|
||||
|
||||
'@tailwindcss/node@4.1.18':
|
||||
dependencies:
|
||||
'@jridgewell/remapping': 2.3.5
|
||||
enhanced-resolve: 5.18.4
|
||||
jiti: 2.6.1
|
||||
lightningcss: 1.30.2
|
||||
magic-string: 0.30.21
|
||||
source-map-js: 1.2.1
|
||||
tailwindcss: 4.1.18
|
||||
|
||||
'@tailwindcss/oxide-android-arm64@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-darwin-arm64@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-darwin-x64@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-freebsd-x64@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-linux-arm-gnueabihf@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-linux-arm64-gnu@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-linux-arm64-musl@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-linux-x64-gnu@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-linux-x64-musl@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-wasm32-wasi@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-win32-arm64-msvc@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-win32-x64-msvc@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide@4.1.18':
|
||||
optionalDependencies:
|
||||
'@tailwindcss/oxide-android-arm64': 4.1.18
|
||||
'@tailwindcss/oxide-darwin-arm64': 4.1.18
|
||||
'@tailwindcss/oxide-darwin-x64': 4.1.18
|
||||
'@tailwindcss/oxide-freebsd-x64': 4.1.18
|
||||
'@tailwindcss/oxide-linux-arm-gnueabihf': 4.1.18
|
||||
'@tailwindcss/oxide-linux-arm64-gnu': 4.1.18
|
||||
'@tailwindcss/oxide-linux-arm64-musl': 4.1.18
|
||||
'@tailwindcss/oxide-linux-x64-gnu': 4.1.18
|
||||
'@tailwindcss/oxide-linux-x64-musl': 4.1.18
|
||||
'@tailwindcss/oxide-wasm32-wasi': 4.1.18
|
||||
'@tailwindcss/oxide-win32-arm64-msvc': 4.1.18
|
||||
'@tailwindcss/oxide-win32-x64-msvc': 4.1.18
|
||||
|
||||
'@tailwindcss/postcss@4.1.18':
|
||||
dependencies:
|
||||
'@alloc/quick-lru': 5.2.0
|
||||
'@tailwindcss/node': 4.1.18
|
||||
'@tailwindcss/oxide': 4.1.18
|
||||
postcss: 8.5.6
|
||||
tailwindcss: 4.1.18
|
||||
|
||||
braces@3.0.3:
|
||||
dependencies:
|
||||
fill-range: 7.1.1
|
||||
|
||||
daisyui@5.5.14: {}
|
||||
|
||||
detect-libc@1.0.3: {}
|
||||
|
||||
detect-libc@2.1.2: {}
|
||||
|
||||
enhanced-resolve@5.18.4:
|
||||
dependencies:
|
||||
graceful-fs: 4.2.11
|
||||
tapable: 2.3.0
|
||||
|
||||
fill-range@7.1.1:
|
||||
dependencies:
|
||||
to-regex-range: 5.0.1
|
||||
|
||||
graceful-fs@4.2.11: {}
|
||||
|
||||
is-extglob@2.1.1: {}
|
||||
|
||||
is-glob@4.0.3:
|
||||
dependencies:
|
||||
is-extglob: 2.1.1
|
||||
|
||||
is-number@7.0.0: {}
|
||||
|
||||
jiti@2.6.1: {}
|
||||
|
||||
lightningcss-android-arm64@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-darwin-arm64@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-darwin-x64@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-freebsd-x64@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-linux-arm-gnueabihf@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-linux-arm64-gnu@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-linux-arm64-musl@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-linux-x64-gnu@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-linux-x64-musl@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-win32-arm64-msvc@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-win32-x64-msvc@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss@1.30.2:
|
||||
dependencies:
|
||||
detect-libc: 2.1.2
|
||||
optionalDependencies:
|
||||
lightningcss-android-arm64: 1.30.2
|
||||
lightningcss-darwin-arm64: 1.30.2
|
||||
lightningcss-darwin-x64: 1.30.2
|
||||
lightningcss-freebsd-x64: 1.30.2
|
||||
lightningcss-linux-arm-gnueabihf: 1.30.2
|
||||
lightningcss-linux-arm64-gnu: 1.30.2
|
||||
lightningcss-linux-arm64-musl: 1.30.2
|
||||
lightningcss-linux-x64-gnu: 1.30.2
|
||||
lightningcss-linux-x64-musl: 1.30.2
|
||||
lightningcss-win32-arm64-msvc: 1.30.2
|
||||
lightningcss-win32-x64-msvc: 1.30.2
|
||||
|
||||
magic-string@0.30.21:
|
||||
dependencies:
|
||||
'@jridgewell/sourcemap-codec': 1.5.5
|
||||
|
||||
micromatch@4.0.8:
|
||||
dependencies:
|
||||
braces: 3.0.3
|
||||
picomatch: 2.3.1
|
||||
|
||||
mri@1.2.0: {}
|
||||
|
||||
nanoid@3.3.11: {}
|
||||
|
||||
node-addon-api@7.1.1: {}
|
||||
|
||||
picocolors@1.1.1: {}
|
||||
|
||||
picomatch@2.3.1: {}
|
||||
|
||||
postcss@8.5.6:
|
||||
dependencies:
|
||||
nanoid: 3.3.11
|
||||
picocolors: 1.1.1
|
||||
source-map-js: 1.2.1
|
||||
|
||||
prettier-plugin-jinja-template@2.1.0(prettier@3.7.4):
|
||||
dependencies:
|
||||
prettier: 3.7.4
|
||||
|
||||
prettier-plugin-tailwindcss@0.7.2(prettier@3.7.4):
|
||||
dependencies:
|
||||
prettier: 3.7.4
|
||||
|
||||
prettier@3.7.4: {}
|
||||
|
||||
source-map-js@1.2.1: {}
|
||||
|
||||
tailwindcss@4.1.18: {}
|
||||
|
||||
tapable@2.3.0: {}
|
||||
|
||||
to-regex-range@5.0.1:
|
||||
dependencies:
|
||||
is-number: 7.0.0
|
||||
@ -1,3 +0,0 @@
|
||||
onlyBuiltDependencies:
|
||||
- '@parcel/watcher'
|
||||
- '@tailwindcss/oxide'
|
||||
@ -9,8 +9,7 @@ dependencies = [
|
||||
"django>=5.2.1",
|
||||
"django-extensions>=4.1",
|
||||
"django-model-utils>=5.0.0",
|
||||
"django-ninja>=1.4.5",
|
||||
"django-ninja-extra>=0.30.6",
|
||||
"django-ninja>=1.4.1",
|
||||
"django-oauth-toolkit>=3.0.1",
|
||||
"django-polymorphic>=4.1.0",
|
||||
"enviformer",
|
||||
@ -19,7 +18,6 @@ dependencies = [
|
||||
"envipy-plugins",
|
||||
"epam-indigo>=1.30.1",
|
||||
"gunicorn>=23.0.0",
|
||||
"jsonref>=1.1.0",
|
||||
"networkx>=3.4.2",
|
||||
"psycopg2-binary>=2.9.10",
|
||||
"python-dotenv>=1.1.0",
|
||||
@ -29,14 +27,13 @@ dependencies = [
|
||||
"scikit-learn>=1.6.1",
|
||||
"sentry-sdk[django]>=2.32.0",
|
||||
"setuptools>=80.8.0",
|
||||
"nh3==0.3.2",
|
||||
"polars==1.35.1",
|
||||
]
|
||||
|
||||
[tool.uv.sources]
|
||||
enviformer = { git = "ssh://git@git.envipath.com/enviPath/enviformer.git", rev = "v0.1.4" }
|
||||
envipy-plugins = { git = "ssh://git@git.envipath.com/enviPath/enviPy-plugins.git", rev = "v0.1.0" }
|
||||
envipy-additional-information = { git = "ssh://git@git.envipath.com/enviPath/enviPy-additional-information.git", branch = "develop" }
|
||||
envipy-additional-information = { git = "ssh://git@git.envipath.com/enviPath/enviPy-additional-information.git", rev = "v0.1.7"}
|
||||
envipy-ambit = { git = "ssh://git@git.envipath.com/enviPath/enviPy-ambit.git" }
|
||||
|
||||
[project.optional-dependencies]
|
||||
@ -47,17 +44,8 @@ dev = [
|
||||
"poethepoet>=0.37.0",
|
||||
"pre-commit>=4.3.0",
|
||||
"ruff>=0.13.3",
|
||||
"pytest-playwright>=0.7.1",
|
||||
"pytest-django>=4.11.1",
|
||||
"pytest-cov>=7.0.0",
|
||||
]
|
||||
pepper-plugin = [
|
||||
"matplotlib>=3.10.8",
|
||||
"pyyaml>=6.0.3",
|
||||
"emcee>=3.1.6",
|
||||
"mordredcommunity==2.0.7",
|
||||
"padelpy" # Remove once we're certain we'll go with mordred
|
||||
]
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 100
|
||||
|
||||
@ -77,82 +65,28 @@ docstring-code-format = true
|
||||
|
||||
[tool.poe.tasks]
|
||||
# Main tasks
|
||||
setup = { sequence = [
|
||||
"db-up",
|
||||
"migrate",
|
||||
"bootstrap",
|
||||
], help = "Complete setup: start database, run migrations, and bootstrap data" }
|
||||
dev = { cmd = "uv run python scripts/dev_server.py", help = "Start the development server with CSS watcher", deps = [
|
||||
"db-up",
|
||||
"js-deps",
|
||||
] }
|
||||
build = { sequence = [
|
||||
"build-frontend",
|
||||
"collectstatic",
|
||||
], help = "Build frontend assets and collect static files" }
|
||||
setup = { sequence = ["db-up", "migrate", "bootstrap"], help = "Complete setup: start database, run migrations, and bootstrap data" }
|
||||
dev = { cmd = "python manage.py runserver", help = "Start the development server", deps = ["db-up"] }
|
||||
|
||||
# Database tasks
|
||||
db-up = { cmd = "docker compose -p envipath -f docker-compose.dev.yml up -d", help = "Start PostgreSQL database using Docker Compose" }
|
||||
db-down = { cmd = "docker compose -p envipath -f docker-compose.dev.yml down", help = "Stop PostgreSQL database" }
|
||||
|
||||
# Celery tasks
|
||||
celery = { cmd = "celery -A envipath worker -l INFO -Q predict,model,background", help = "Start Celery worker for async task processing" }
|
||||
celery-dev = { sequence = [
|
||||
"db-up",
|
||||
"celery",
|
||||
], help = "Start database and Celery worker" }
|
||||
|
||||
# Frontend tasks
|
||||
js-deps = { cmd = "uv run python scripts/pnpm_wrapper.py install", help = "Install frontend dependencies" }
|
||||
db-up = { cmd = "docker compose -f docker-compose.dev.yml up -d", help = "Start PostgreSQL database using Docker Compose" }
|
||||
db-down = { cmd = "docker compose -f docker-compose.dev.yml down", help = "Stop PostgreSQL database" }
|
||||
|
||||
# Full cleanup tasks
|
||||
clean = { sequence = [
|
||||
"clean-db",
|
||||
], help = "Remove model files and database volumes (WARNING: destroys all data!)" }
|
||||
clean = { sequence = ["clean-db"], help = "Remove model files and database volumes (WARNING: destroys all data!)" }
|
||||
clean-db = { cmd = "docker compose -f docker-compose.dev.yml down -v", help = "Removes the database container and volume." }
|
||||
|
||||
# Django tasks
|
||||
migrate = { cmd = "uv run python manage.py migrate", help = "Run database migrations" }
|
||||
migrate = { cmd = "python manage.py migrate", help = "Run database migrations" }
|
||||
bootstrap = { shell = """
|
||||
echo "Bootstrapping initial data..."
|
||||
echo "This will take a bit ⏱️. Get yourself some coffee..."
|
||||
uv run python manage.py bootstrap
|
||||
python manage.py bootstrap
|
||||
echo "✓ Bootstrap complete"
|
||||
echo ""
|
||||
echo "Default admin credentials:"
|
||||
echo " Username: admin"
|
||||
echo " Email: admin@envipath.com"
|
||||
echo " Password: SuperSafe"
|
||||
""", help = "Bootstrap initial data (anonymous user, packages, models)" }
|
||||
shell = { cmd = "uv run python manage.py shell", help = "Open Django shell" }
|
||||
|
||||
|
||||
build-frontend = { cmd = "uv run python scripts/pnpm_wrapper.py run build", help = "Build frontend assets using pnpm", deps = [
|
||||
"js-deps",
|
||||
] } # Build tasks
|
||||
|
||||
|
||||
collectstatic = { cmd = "uv run python manage.py collectstatic --noinput", help = "Collect static files for production", deps = [
|
||||
"build-frontend",
|
||||
] }
|
||||
|
||||
frontend-test-setup = { cmd = "playwright install", help = "Install the browsers required for frontend testing" }
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
addopts = "--verbose --capture=no --durations=10"
|
||||
testpaths = ["tests", "*/tests"]
|
||||
pythonpath = ["."]
|
||||
norecursedirs = [
|
||||
"env",
|
||||
"venv",
|
||||
"envipy-plugins",
|
||||
"envipy-additional-information",
|
||||
"envipy-ambit",
|
||||
"enviformer",
|
||||
]
|
||||
markers = [
|
||||
"api: API tests",
|
||||
"frontend: Frontend tests",
|
||||
"end2end: End-to-end tests",
|
||||
"slow: Slow tests",
|
||||
]
|
||||
""", help = "Bootstrap initial data (anonymous user, packages, models)" }
|
||||
shell = { cmd = "python manage.py shell", help = "Open Django shell" }
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user