Compare commits
60 Commits
pre_frontp
...
58ab5b33e3
| Author | SHA1 | Date | |
|---|---|---|---|
| 58ab5b33e3 | |||
| 73f0202267 | |||
| 27c5bad9c5 | |||
| 5789f20e7f | |||
| c0cfdb9255 | |||
| 5da8dbc191 | |||
| dc18b73e08 | |||
| d80dfb5ee3 | |||
| 9f63a9d4de | |||
| 5565b9cb9e | |||
| ab0b5a5186 | |||
| f905bf21cf | |||
| 1fd993927c | |||
| 2a2fe4f147 | |||
| 5f5ae76182 | |||
| 1c2f70b3b9 | |||
| 54f8302104 | |||
| 6499a0c659 | |||
| 7c60a28801 | |||
| a4a4179261 | |||
| 6ee4ac535a | |||
| d6065ee888 | |||
| 9db4806d75 | |||
| 4bf20e62ef | |||
| 8adb93012a | |||
| d2d475b990 | |||
| 648ec150a9 | |||
| 46b0f1c124 | |||
| d5af898053 | |||
| b7379b3337 | |||
| d6440f416c | |||
| 901de4640c | |||
| 69df139256 | |||
| e8ae494c16 | |||
| fd2e2c2534 | |||
| 1a2c9bb543 | |||
| 7f6f209b4a | |||
| b6c35fea76 | |||
| fa8a191383 | |||
| 67b1baa5b0 | |||
| 89c194dcca | |||
| a8554c903c | |||
| d584791ee8 | |||
| e60052b05c | |||
| 3ff8d938d6 | |||
| a7f48c2cf9 | |||
| 39faab3d11 | |||
| 4e80cd63cd | |||
| 6592f0a68e | |||
| 21d30a923f | |||
| 12a20756d6 | |||
| d20a705011 | |||
| debbef8158 | |||
| 2799718951 | |||
| 305fdc41fb | |||
| 9deca8867e | |||
| df6056fb86 | |||
| c1553d9cd4 | |||
| 2b79adc2f7 | |||
| ddf1fd3515 |
@ -20,3 +20,16 @@ LOG_LEVEL='INFO'
|
||||
SERVER_URL='http://localhost:8000'
|
||||
PLUGINS_ENABLED=True
|
||||
EP_DATA_DIR='data'
|
||||
EMAIL_HOST_USER='admin@envipath.com'
|
||||
EMAIL_HOST_PASSWORD='dummy-password'
|
||||
|
||||
DEFAULT_FROM_EMAIL="test@test.com"
|
||||
SERVER_EMAIL='test@test.com'
|
||||
|
||||
# Testing settings VScode
|
||||
DJANGO_SETTINGS_MODULE='envipath.settings'
|
||||
MANAGE_PY_PATH='./manage.py'
|
||||
|
||||
APPLICABILITY_DOMAIN_ENABLED=True
|
||||
ENVIFORMER_PRESENT=True
|
||||
MODEL_BUILDING_ENABLED=True
|
||||
|
||||
67
.gitea/actions/setup-envipy/action.yaml
Normal file
@ -0,0 +1,67 @@
|
||||
name: 'Setup enviPy Environment'
|
||||
description: 'Shared setup for enviPy CI - installs dependencies and prepares environment'
|
||||
|
||||
inputs:
|
||||
skip-frontend:
|
||||
description: 'Skip frontend build steps (pnpm, tailwind)'
|
||||
required: false
|
||||
default: 'false'
|
||||
skip-playwright:
|
||||
description: 'Skip Playwright installation'
|
||||
required: false
|
||||
default: 'false'
|
||||
ssh-private-key:
|
||||
description: 'SSH private key for git access'
|
||||
required: true
|
||||
run-migrations:
|
||||
description: 'Run Django migrations after setup'
|
||||
required: false
|
||||
default: 'true'
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Setup ssh
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p ~/.ssh
|
||||
echo "${{ inputs.ssh-private-key }}" > ~/.ssh/id_ed25519
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
ssh-keyscan git.envipath.com >> ~/.ssh/known_hosts
|
||||
eval $(ssh-agent -s)
|
||||
ssh-add ~/.ssh/id_ed25519
|
||||
|
||||
- name: Setup Python venv
|
||||
shell: bash
|
||||
run: |
|
||||
uv sync --locked --all-extras --dev
|
||||
|
||||
- name: Install Playwright
|
||||
if: inputs.skip-playwright == 'false'
|
||||
shell: bash
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
playwright install --with-deps
|
||||
|
||||
- name: Build Frontend
|
||||
if: inputs.skip-frontend == 'false'
|
||||
shell: bash
|
||||
run: |
|
||||
uv run python scripts/pnpm_wrapper.py install
|
||||
uv run python scripts/pnpm_wrapper.py run build
|
||||
|
||||
- name: Wait for Postgres
|
||||
shell: bash
|
||||
run: |
|
||||
until pg_isready -h postgres -U ${{ env.POSTGRES_USER }}; do
|
||||
echo "Waiting for postgres..."
|
||||
sleep 2
|
||||
done
|
||||
echo "Postgres is ready!"
|
||||
|
||||
- name: Run Django Migrations
|
||||
if: inputs.run-migrations == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
python manage.py migrate --noinput
|
||||
53
.gitea/docker/Dockerfile.ci
Normal file
@ -0,0 +1,53 @@
|
||||
# Custom CI Docker image for Gitea runners
|
||||
# Pre-installs Node.js 24, pnpm 10, uv, and system dependencies
|
||||
# to eliminate setup time in CI workflows
|
||||
|
||||
FROM ubuntu:24.04
|
||||
|
||||
# Prevent interactive prompts during package installation
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && \
|
||||
apt-get install -y \
|
||||
postgresql-client \
|
||||
redis-tools \
|
||||
openjdk-11-jre-headless \
|
||||
curl \
|
||||
ca-certificates \
|
||||
gnupg \
|
||||
lsb-release \
|
||||
git \
|
||||
ssh \
|
||||
libxrender1 \
|
||||
libxext6 \
|
||||
libfontconfig1 \
|
||||
libfreetype6 \
|
||||
libcairo2 \
|
||||
libglib2.0-0t64 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Node.js 24 via NodeSource
|
||||
RUN curl -fsSL https://deb.nodesource.com/setup_24.x | bash - && \
|
||||
apt-get install -y nodejs && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Enable corepack and install pnpm 10
|
||||
RUN corepack enable && \
|
||||
corepack prepare pnpm@10 --activate
|
||||
|
||||
# Install uv https://docs.astral.sh/uv/guides/integration/docker/#available-images
|
||||
COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
# Verify installations
|
||||
RUN node --version && \
|
||||
npm --version && \
|
||||
pnpm --version && \
|
||||
uv --version && \
|
||||
pg_isready --version && \
|
||||
redis-cli --version && \
|
||||
java -version
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /workspace
|
||||
86
.gitea/workflows/api-ci.yaml
Normal file
@ -0,0 +1,86 @@
|
||||
name: API CI
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- 'epapi/**'
|
||||
- 'epdb/models.py' # API depends on models
|
||||
- 'epdb/logic.py' # API depends on business logic
|
||||
- 'tests/fixtures/**' # API tests use fixtures
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
api-tests:
|
||||
if: ${{ !contains(gitea.event.pull_request.title, 'WIP') }}
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: git.envipath.com/envipath/envipy-ci:latest
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16
|
||||
env:
|
||||
POSTGRES_USER: ${{ vars.POSTGRES_USER }}
|
||||
POSTGRES_PASSWORD: ${{ secrets.POSTGRES_PASSWORD }}
|
||||
POSTGRES_DB: ${{ vars.POSTGRES_DB }}
|
||||
ports:
|
||||
- ${{ vars.POSTGRES_PORT}}:5432
|
||||
options: >-
|
||||
--health-cmd="pg_isready -U postgres"
|
||||
--health-interval=10s
|
||||
--health-timeout=5s
|
||||
--health-retries=5
|
||||
|
||||
env:
|
||||
RUNNER_TOOL_CACHE: /toolcache
|
||||
EP_DATA_DIR: /opt/enviPy/
|
||||
ALLOWED_HOSTS: 127.0.0.1,localhost
|
||||
DEBUG: True
|
||||
LOG_LEVEL: INFO
|
||||
MODEL_BUILDING_ENABLED: True
|
||||
APPLICABILITY_DOMAIN_ENABLED: True
|
||||
ENVIFORMER_PRESENT: True
|
||||
ENVIFORMER_DEVICE: cpu
|
||||
FLAG_CELERY_PRESENT: False
|
||||
PLUGINS_ENABLED: True
|
||||
SERVER_URL: http://localhost:8000
|
||||
ADMIN_APPROVAL_REQUIRED: True
|
||||
REGISTRATION_MANDATORY: True
|
||||
LOG_DIR: ''
|
||||
# DB
|
||||
POSTGRES_SERVICE_NAME: postgres
|
||||
POSTGRES_DB: ${{ vars.POSTGRES_DB }}
|
||||
POSTGRES_USER: ${{ vars.POSTGRES_USER }}
|
||||
POSTGRES_PASSWORD: ${{ secrets.POSTGRES_PASSWORD }}
|
||||
POSTGRES_PORT: 5432
|
||||
# SENTRY
|
||||
SENTRY_ENABLED: False
|
||||
# MS ENTRA
|
||||
MS_ENTRA_ENABLED: False
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Use shared setup action - skips frontend builds for API-only tests
|
||||
- name: Setup enviPy Environment
|
||||
uses: ./.gitea/actions/setup-envipy
|
||||
with:
|
||||
skip-frontend: 'true'
|
||||
skip-playwright: 'false'
|
||||
ssh-private-key: ${{ secrets.ENVIPY_CI_PRIVATE_KEY }}
|
||||
run-migrations: 'true'
|
||||
|
||||
- name: Run API tests
|
||||
run: |
|
||||
.venv/bin/python manage.py test epapi -v 2
|
||||
|
||||
- name: Test API endpoints availability
|
||||
run: |
|
||||
.venv/bin/python manage.py runserver 0.0.0.0:8000 &
|
||||
SERVER_PID=$!
|
||||
sleep 5
|
||||
curl -f http://localhost:8000/api/v1/docs || echo "API docs not available"
|
||||
kill $SERVER_PID
|
||||
48
.gitea/workflows/build-ci-image.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
name: Build CI Docker Image
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
- main
|
||||
paths:
|
||||
- '.gitea/docker/Dockerfile.ci'
|
||||
- '.gitea/workflows/build-ci-image.yaml'
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to container registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: git.envipath.com
|
||||
username: ${{ secrets.CI_REGISTRY_USER }}
|
||||
password: ${{ secrets.CI_REGISTRY_PASSWORD }}
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: git.envipath.com/envipath/envipy-ci
|
||||
tags: |
|
||||
type=raw,value=latest
|
||||
type=sha,prefix={{branch}}-
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: .gitea/docker/Dockerfile.ci
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=registry,ref=git.envipath.com/envipath/envipy-ci:latest
|
||||
cache-to: type=inline
|
||||
@ -8,7 +8,10 @@ on:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
if: ${{ !contains(gitea.event.pull_request.title, 'WIP') }}
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: git.envipath.com/envipath/envipy-ci:latest
|
||||
|
||||
services:
|
||||
postgres:
|
||||
@ -40,7 +43,7 @@ jobs:
|
||||
EP_DATA_DIR: /opt/enviPy/
|
||||
ALLOWED_HOSTS: 127.0.0.1,localhost
|
||||
DEBUG: True
|
||||
LOG_LEVEL: DEBUG
|
||||
LOG_LEVEL: INFO
|
||||
MODEL_BUILDING_ENABLED: True
|
||||
APPLICABILITY_DOMAIN_ENABLED: True
|
||||
ENVIFORMER_PRESENT: True
|
||||
@ -63,54 +66,22 @@ jobs:
|
||||
MS_ENTRA_ENABLED: False
|
||||
|
||||
steps:
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install system tools via apt
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y postgresql-client redis-tools openjdk-11-jre-headless
|
||||
|
||||
- name: Setup ssh
|
||||
run: |
|
||||
echo "${{ secrets.ENVIPY_CI_PRIVATE_KEY }}" > ~/.ssh/id_ed25519
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
ssh-keyscan git.envipath.com >> ~/.ssh/known_hosts
|
||||
eval $(ssh-agent -s)
|
||||
ssh-add ~/.ssh/id_ed25519
|
||||
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
# Use shared setup action - includes all dependencies and migrations
|
||||
- name: Setup enviPy Environment
|
||||
uses: ./.gitea/actions/setup-envipy
|
||||
with:
|
||||
version: 10
|
||||
skip-frontend: 'false'
|
||||
skip-playwright: 'false'
|
||||
ssh-private-key: ${{ secrets.ENVIPY_CI_PRIVATE_KEY }}
|
||||
run-migrations: 'true'
|
||||
|
||||
- name: Use Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: "pnpm"
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
enable-cache: true
|
||||
|
||||
- name: Setup venv
|
||||
- name: Run frontend tests
|
||||
run: |
|
||||
uv sync --locked --all-extras --dev
|
||||
|
||||
- name: Wait for services
|
||||
run: |
|
||||
until pg_isready -h postgres -U postgres; do sleep 2; done
|
||||
# until redis-cli -h redis ping; do sleep 2; done
|
||||
|
||||
- name: Run Django migrations
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
python manage.py migrate --noinput
|
||||
.venv/bin/python manage.py test --tag frontend
|
||||
|
||||
- name: Run Django tests
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
python manage.py test tests --exclude-tag slow
|
||||
.venv/bin/python manage.py test tests --exclude-tag slow --exclude-tag frontend
|
||||
|
||||
369
.gitignore
vendored
@ -1,12 +1,375 @@
|
||||
*.pyc
|
||||
|
||||
|
||||
|
||||
### Python ###
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[codz]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py.cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
.idea/
|
||||
db.sqlite3-journal
|
||||
static/admin/
|
||||
static/django_extensions/
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
|
||||
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
|
||||
# pdm.lock
|
||||
# pdm.toml
|
||||
.pdm-python
|
||||
.pdm-build/
|
||||
|
||||
# pixi
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
|
||||
# pixi.lock
|
||||
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
|
||||
# in the .venv directory. It is recommended not to include this directory in version control.
|
||||
.pixi
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# Redis
|
||||
*.rdb
|
||||
*.aof
|
||||
*.pid
|
||||
|
||||
# RabbitMQ
|
||||
mnesia/
|
||||
rabbitmq/
|
||||
rabbitmq-data/
|
||||
|
||||
# ActiveMQ
|
||||
activemq-data/
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.envrc
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
.idea/
|
||||
|
||||
# Abstra
|
||||
# Abstra is an AI-powered process automation framework.
|
||||
# Ignore directories containing user credentials, local state, and settings.
|
||||
# Learn more at https://abstra.io/docs
|
||||
.abstra/
|
||||
|
||||
# Visual Studio Code
|
||||
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
||||
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
||||
# you could uncomment the following to ignore the entire vscode folder
|
||||
.vscode/
|
||||
*.code-workspace
|
||||
|
||||
# Ruff stuff:
|
||||
.ruff_cache/
|
||||
|
||||
# UV cache
|
||||
.uv-cache/
|
||||
|
||||
# PyPI configuration file
|
||||
.pypirc
|
||||
|
||||
# Marimo
|
||||
marimo/_static/
|
||||
marimo/_lsp/
|
||||
__marimo__/
|
||||
|
||||
# Streamlit
|
||||
.streamlit/secrets.toml
|
||||
|
||||
### Agents ###
|
||||
.claude/
|
||||
.codex/
|
||||
.cursor/
|
||||
.github/prompts/
|
||||
.junie/
|
||||
.windsurf/
|
||||
|
||||
AGENTS.md
|
||||
CLAUDE.md
|
||||
GEMINI.md
|
||||
.aider.*
|
||||
|
||||
### Node.js ###
|
||||
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
lerna-debug.log*
|
||||
|
||||
# Diagnostic reports (https://nodejs.org/api/report.html)
|
||||
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
|
||||
|
||||
# Runtime data
|
||||
pids
|
||||
*.pid
|
||||
*.seed
|
||||
*.pid.lock
|
||||
|
||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||
lib-cov
|
||||
|
||||
# Coverage directory used by tools like istanbul
|
||||
coverage
|
||||
*.lcov
|
||||
|
||||
# nyc test coverage
|
||||
.nyc_output
|
||||
|
||||
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
|
||||
.grunt
|
||||
|
||||
# Bower dependency directory (https://bower.io/)
|
||||
bower_components
|
||||
|
||||
# node-waf configuration
|
||||
.lock-wscript
|
||||
|
||||
# Compiled binary addons (https://nodejs.org/api/addons.html)
|
||||
build/Release
|
||||
|
||||
# Dependency directories
|
||||
node_modules/
|
||||
jspm_packages/
|
||||
|
||||
# Snowpack dependency directory (https://snowpack.dev/)
|
||||
web_modules/
|
||||
|
||||
# TypeScript cache
|
||||
*.tsbuildinfo
|
||||
|
||||
# Optional npm cache directory
|
||||
.npm
|
||||
|
||||
# Optional eslint cache
|
||||
.eslintcache
|
||||
|
||||
# Optional stylelint cache
|
||||
.stylelintcache
|
||||
|
||||
# Optional REPL history
|
||||
.node_repl_history
|
||||
|
||||
# Output of 'npm pack'
|
||||
*.tgz
|
||||
|
||||
# Yarn Integrity file
|
||||
.yarn-integrity
|
||||
|
||||
# dotenv environment variable files
|
||||
.env
|
||||
.env.*
|
||||
!.env.example
|
||||
|
||||
# parcel-bundler cache (https://parceljs.org/)
|
||||
.cache
|
||||
.parcel-cache
|
||||
|
||||
# Next.js build output
|
||||
.next
|
||||
out
|
||||
|
||||
# Nuxt.js build / generate output
|
||||
.nuxt
|
||||
dist
|
||||
.output
|
||||
|
||||
# Gatsby files
|
||||
.cache/
|
||||
# Comment in the public line in if your project uses Gatsby and not Next.js
|
||||
# https://nextjs.org/blog/next-9-1#public-directory-support
|
||||
# public
|
||||
|
||||
# vuepress build output
|
||||
.vuepress/dist
|
||||
|
||||
# vuepress v2.x temp and cache directory
|
||||
.temp
|
||||
.cache
|
||||
|
||||
# Sveltekit cache directory
|
||||
.svelte-kit/
|
||||
|
||||
# vitepress build output
|
||||
**/.vitepress/dist
|
||||
|
||||
# vitepress cache directory
|
||||
**/.vitepress/cache
|
||||
|
||||
# Docusaurus cache and generated files
|
||||
.docusaurus
|
||||
|
||||
# Serverless directories
|
||||
.serverless/
|
||||
|
||||
# FuseBox cache
|
||||
.fusebox/
|
||||
|
||||
# DynamoDB Local files
|
||||
.dynamodb/
|
||||
|
||||
# Firebase cache directory
|
||||
.firebase/
|
||||
|
||||
# TernJS port file
|
||||
.tern-port
|
||||
|
||||
# Stores VSCode versions used for testing VSCode extensions
|
||||
.vscode-test
|
||||
|
||||
# yarn v3
|
||||
.pnp.*
|
||||
.yarn/*
|
||||
!.yarn/patches
|
||||
!.yarn/plugins
|
||||
!.yarn/releases
|
||||
!.yarn/sdks
|
||||
!.yarn/versions
|
||||
|
||||
# Vite files
|
||||
vite.config.js.timestamp-*
|
||||
vite.config.ts.timestamp-*
|
||||
.vite/
|
||||
|
||||
### Custom ###
|
||||
|
||||
debug.log
|
||||
scratches/
|
||||
|
||||
test-results/
|
||||
data/
|
||||
*.arff
|
||||
|
||||
# Auto generated
|
||||
static/css/output.css
|
||||
|
||||
# macOS system files
|
||||
.DS_Store
|
||||
.Trashes
|
||||
._*
|
||||
|
||||
@ -8,6 +8,7 @@ repos:
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
- id: check-added-large-files
|
||||
exclude: ^static/images/
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.13.3
|
||||
@ -20,6 +21,15 @@ repos:
|
||||
- id: ruff-format
|
||||
types_or: [python, pyi]
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: prettier-jinja-templates
|
||||
name: Format Jinja templates with Prettier
|
||||
entry: pnpm exec prettier --plugin=prettier-plugin-jinja-template --parser=jinja-template --write
|
||||
language: system
|
||||
types: [file]
|
||||
files: ^templates/.*\.html$
|
||||
|
||||
# - repo: local
|
||||
# hooks:
|
||||
# - id: django-check
|
||||
|
||||
11
.prettierrc.json
Normal file
@ -0,0 +1,11 @@
|
||||
{
|
||||
"plugins": ["prettier-plugin-jinja-template", "prettier-plugin-tailwindcss"],
|
||||
"overrides": [
|
||||
{
|
||||
"files": "templates/**/*.html",
|
||||
"options": {
|
||||
"parser": "jinja-template"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
74
README.md
@ -7,11 +7,12 @@ These instructions will guide you through setting up the project for local devel
|
||||
### Prerequisites
|
||||
|
||||
- Python 3.11 or later
|
||||
- [uv](https://github.com/astral-sh/uv) - A fast Python package installer and resolver.
|
||||
- **Docker and Docker Compose** - Required for running the PostgreSQL database.
|
||||
- [uv](https://github.com/astral-sh/uv) - Python package manager
|
||||
- **Docker and Docker Compose** - Required for running PostgreSQL database and Redis (for async Celery tasks)
|
||||
- Git
|
||||
- Make
|
||||
|
||||
> **Note:** This application requires PostgreSQL, which uses `ArrayField`. Docker is the recommended way to run PostgreSQL locally.
|
||||
> **Note:** This application requires PostgreSQL (uses `ArrayField`). Docker is the easiest way to run PostgreSQL locally.
|
||||
|
||||
### 1. Install Dependencies
|
||||
|
||||
@ -23,7 +24,12 @@ Then, sync the project dependencies. This will create a virtual environment in `
|
||||
uv sync --dev
|
||||
```
|
||||
|
||||
> **Note on RDkit:** If you have a different version of rdkit installed globally, the dependency installation may fail. If this happens, please uninstall the global version and run `uv sync` again.
|
||||
Note on RDkit installation: if you have rdkit installed on your system globally with a different version of python, the installation will try to link against that and subsequent calls fail. Only option remove global rdkit and rerun sync.
|
||||
|
||||
---
|
||||
|
||||
The frontend requires `pnpm` to correctly display in development.
|
||||
[Install it here](https://pnpm.io/installation).
|
||||
|
||||
### 2. Set Up Environment File
|
||||
|
||||
@ -44,6 +50,7 @@ uv run poe setup
|
||||
```
|
||||
|
||||
This single command will:
|
||||
|
||||
1. Start the PostgreSQL database using Docker Compose.
|
||||
2. Run database migrations.
|
||||
3. Bootstrap initial data (anonymous user, default packages, models).
|
||||
@ -54,9 +61,12 @@ After setup, start the development server:
|
||||
uv run poe dev
|
||||
```
|
||||
|
||||
This will start the css-watcher as well as the django-development server,
|
||||
The application will be available at `http://localhost:8000`.
|
||||
|
||||
#### Other useful Poe commands:
|
||||
**Note:** The development server automatically starts a CSS watcher (`pnpm run dev`) alongside the Django server to rebuild CSS files when changes are detected. This ensures your styles are always up-to-date during development.
|
||||
|
||||
#### Other useful Poe commands
|
||||
|
||||
You can list all available commands by running `uv run poe --help`.
|
||||
|
||||
@ -66,26 +76,50 @@ uv run poe db-down # Stop PostgreSQL
|
||||
uv run poe migrate # Run migrations only
|
||||
uv run poe bootstrap # Bootstrap data only
|
||||
uv run poe shell # Open the Django shell
|
||||
uv run poe build # Build frontend assets and collect static files
|
||||
uv run poe clean # Remove database volumes (WARNING: destroys all data)
|
||||
uv run poe celery # Start Celery worker for async task processing
|
||||
uv run poe celery-dev # Start database and Celery worker
|
||||
```
|
||||
|
||||
### 4. Async Celery Setup (Optional)
|
||||
|
||||
By default, Celery tasks run synchronously (`CELERY_TASK_ALWAYS_EAGER = True`), which means prediction tasks block the HTTP request until completion. To enable asynchronous task processing with live status updates on pathway pages:
|
||||
|
||||
1. **Set the Celery flag in your `.env` file:**
|
||||
|
||||
```bash
|
||||
FLAG_CELERY_PRESENT=True
|
||||
```
|
||||
|
||||
2. **Start Redis and Celery worker:**
|
||||
|
||||
```bash
|
||||
uv run poe celery-dev
|
||||
```
|
||||
|
||||
3. **Start the development server** (in another terminal):
|
||||
```bash
|
||||
uv run poe dev
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
* **Docker Connection Error:** If you see an error like `open //./pipe/dockerDesktopLinuxEngine: The system cannot find the file specified` (on Windows), it likely means your Docker Desktop application is not running. Please start Docker Desktop and try the command again.
|
||||
- **Docker Connection Error:** If you see an error like `open //./pipe/dockerDesktopLinuxEngine: The system cannot find the file specified` (on Windows), it likely means your Docker Desktop application is not running. Please start Docker Desktop and try the command again.
|
||||
|
||||
* **SSH Keys for Git Dependencies:** Some dependencies are installed from private git repositories and require SSH authentication. Ensure your SSH keys are configured correctly for Git.
|
||||
* For a general guide, see [GitHub's official documentation](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent).
|
||||
* **Windows Users:** If `uv sync` hangs while fetching git dependencies, you may need to explicitly configure Git to use the Windows OpenSSH client and use the `ssh-agent` to manage your key's passphrase.
|
||||
- **SSH Keys for Git Dependencies:** Some dependencies are installed from private git repositories and require SSH authentication. Ensure your SSH keys are configured correctly for Git.
|
||||
- For a general guide, see [GitHub's official documentation](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent).
|
||||
- **Windows Users:** If `uv sync` hangs while fetching git dependencies, you may need to explicitly configure Git to use the Windows OpenSSH client and use the `ssh-agent` to manage your key's passphrase.
|
||||
1. **Point Git to the correct SSH executable:**
|
||||
```powershell
|
||||
git config --global core.sshCommand "C:/Windows/System32/OpenSSH/ssh.exe"
|
||||
```
|
||||
2. **Enable and use the SSH agent:**
|
||||
|
||||
1. **Point Git to the correct SSH executable:**
|
||||
```powershell
|
||||
git config --global core.sshCommand "C:/Windows/System32/OpenSSH/ssh.exe"
|
||||
```
|
||||
2. **Enable and use the SSH agent:**
|
||||
```powershell
|
||||
# Run these commands in an administrator PowerShell
|
||||
Get-Service ssh-agent | Set-Service -StartupType Automatic -PassThru | Start-Service
|
||||
```powershell
|
||||
# Run these commands in an administrator PowerShell
|
||||
Get-Service ssh-agent | Set-Service -StartupType Automatic -PassThru | Start-Service
|
||||
|
||||
# Add your key to the agent. It will prompt for the passphrase once.
|
||||
ssh-add
|
||||
```
|
||||
# Add your key to the agent. It will prompt for the passphrase once.
|
||||
ssh-add
|
||||
```
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
services:
|
||||
db:
|
||||
image: postgres:15
|
||||
image: postgres:18
|
||||
container_name: envipath-postgres
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
@ -9,12 +9,18 @@ services:
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
- postgres_data:/var/lib/postgresql
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
container_name: envipath-redis
|
||||
ports:
|
||||
- "6379:6379"
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
from epdb.api import router as epdb_app_router
|
||||
from epapi.v1.router import router as v1_router # Refactored API from epdb.api_v2
|
||||
from epdb.legacy_api import router as epdb_legacy_app_router
|
||||
from ninja import NinjaAPI
|
||||
|
||||
@ -8,5 +8,5 @@ api_v1 = NinjaAPI(title="API V1 Docs", urls_namespace="api-v1")
|
||||
api_legacy = NinjaAPI(title="Legacy API Docs", urls_namespace="api-legacy")
|
||||
|
||||
# Add routers
|
||||
api_v1.add_router("/", epdb_app_router)
|
||||
api_v1.add_router("/", v1_router)
|
||||
api_legacy.add_router("/", epdb_legacy_app_router)
|
||||
|
||||
@ -21,7 +21,9 @@ from sklearn.tree import DecisionTreeClassifier
|
||||
# Build paths inside the project like this: BASE_DIR / 'subdir'.
|
||||
BASE_DIR = Path(__file__).resolve().parent.parent
|
||||
|
||||
load_dotenv(BASE_DIR / ".env", override=False)
|
||||
ENV_PATH = os.environ.get("ENV_PATH", BASE_DIR / ".env")
|
||||
print(f"Loading env from {ENV_PATH}")
|
||||
load_dotenv(ENV_PATH, override=False)
|
||||
|
||||
# Quick-start development settings - unsuitable for production
|
||||
# See https://docs.djangoproject.com/en/4.2/howto/deployment/checklist/
|
||||
@ -48,10 +50,25 @@ INSTALLED_APPS = [
|
||||
"django_extensions",
|
||||
"oauth2_provider",
|
||||
# Custom
|
||||
"epapi", # API endpoints (v1, etc.)
|
||||
"epdb",
|
||||
"migration",
|
||||
]
|
||||
|
||||
TENANT = os.environ.get("TENANT", "public")
|
||||
|
||||
if TENANT != "public":
|
||||
INSTALLED_APPS.append(TENANT)
|
||||
|
||||
EPDB_PACKAGE_MODEL = os.environ.get("EPDB_PACKAGE_MODEL", "epdb.Package")
|
||||
|
||||
|
||||
def GET_PACKAGE_MODEL():
|
||||
from django.apps import apps
|
||||
|
||||
return apps.get_model(EPDB_PACKAGE_MODEL)
|
||||
|
||||
|
||||
AUTHENTICATION_BACKENDS = [
|
||||
"django.contrib.auth.backends.ModelBackend",
|
||||
]
|
||||
@ -87,12 +104,13 @@ TEMPLATES = [
|
||||
"django.template.context_processors.request",
|
||||
"django.contrib.auth.context_processors.auth",
|
||||
"django.contrib.messages.context_processors.messages",
|
||||
"epdb.context_processors.package_context",
|
||||
],
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
ALLOWED_HTML_TAGS = {'b', 'i', 'u', 'br', 'em', 'mark', 'p', 's', 'strong'}
|
||||
ALLOWED_HTML_TAGS = {"b", "i", "u", "br", "em", "mark", "p", "s", "strong"}
|
||||
|
||||
WSGI_APPLICATION = "envipath.wsgi.application"
|
||||
|
||||
@ -183,6 +201,12 @@ if not os.path.exists(LOG_DIR):
|
||||
os.mkdir(LOG_DIR)
|
||||
|
||||
PLUGIN_DIR = os.path.join(EP_DATA_DIR, "plugins")
|
||||
|
||||
API_PAGINATION_DEFAULT_PAGE_SIZE = int(os.environ.get("API_PAGINATION_DEFAULT_PAGE_SIZE", 50))
|
||||
PAGINATION_MAX_PER_PAGE_SIZE = int(
|
||||
os.environ.get("API_PAGINATION_MAX_PAGE_SIZE", 100)
|
||||
) # Ninja override
|
||||
|
||||
if not os.path.exists(PLUGIN_DIR):
|
||||
os.mkdir(PLUGIN_DIR)
|
||||
|
||||
@ -245,6 +269,7 @@ LOGGING = {
|
||||
ENVIFORMER_PRESENT = os.environ.get("ENVIFORMER_PRESENT", "False") == "True"
|
||||
ENVIFORMER_DEVICE = os.environ.get("ENVIFORMER_DEVICE", "cpu")
|
||||
|
||||
|
||||
# If celery is not present set always eager to true which will cause delayed tasks to block until finished
|
||||
FLAG_CELERY_PRESENT = os.environ.get("FLAG_CELERY_PRESENT", "False") == "True"
|
||||
if not FLAG_CELERY_PRESENT:
|
||||
@ -339,12 +364,21 @@ FLAGS = {
|
||||
# -> /password_reset/done is covered as well
|
||||
LOGIN_EXEMPT_URLS = [
|
||||
"/register",
|
||||
"/api/v1/", # Let API handle its own authentication
|
||||
"/api/legacy/",
|
||||
"/o/token/",
|
||||
"/o/userinfo/",
|
||||
"/password_reset/",
|
||||
"/reset/",
|
||||
"/microsoft/",
|
||||
"/terms",
|
||||
"/privacy",
|
||||
"/cookie-policy",
|
||||
"/about",
|
||||
"/contact",
|
||||
"/careers",
|
||||
"/cite",
|
||||
"/legal",
|
||||
]
|
||||
|
||||
# MS AD/Entra
|
||||
|
||||
@ -23,12 +23,20 @@ from .api import api_v1, api_legacy
|
||||
|
||||
urlpatterns = [
|
||||
path("", include("epdb.urls")),
|
||||
path("", include("migration.urls")),
|
||||
path("admin/", admin.site.urls),
|
||||
path("api/v1/", api_v1.urls),
|
||||
path("api/legacy/", api_legacy.urls),
|
||||
path("o/", include("oauth2_provider.urls", namespace="oauth2_provider")),
|
||||
]
|
||||
|
||||
if "migration" in s.INSTALLED_APPS:
|
||||
urlpatterns.append(path("", include("migration.urls")))
|
||||
|
||||
if s.MS_ENTRA_ENABLED:
|
||||
urlpatterns.append(path("", include("epauth.urls")))
|
||||
|
||||
# Custom error handlers
|
||||
handler400 = "epdb.views.handler400"
|
||||
handler403 = "epdb.views.handler403"
|
||||
handler404 = "epdb.views.handler404"
|
||||
handler500 = "epdb.views.handler500"
|
||||
|
||||
0
epapi/__init__.py
Normal file
6
epapi/apps.py
Normal file
@ -0,0 +1,6 @@
|
||||
from django.apps import AppConfig
|
||||
|
||||
|
||||
class EpapiConfig(AppConfig):
|
||||
default_auto_field = "django.db.models.BigAutoField"
|
||||
name = "epapi"
|
||||
0
epapi/migrations/__init__.py
Normal file
1
epapi/tests/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
# Tests for epapi app
|
||||
1
epapi/tests/utils/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
"""Tests for epapi utility modules."""
|
||||
218
epapi/tests/utils/test_validation_errors.py
Normal file
@ -0,0 +1,218 @@
|
||||
"""
|
||||
Tests for validation error utilities.
|
||||
|
||||
Tests the format_validation_error() and handle_validation_error() functions
|
||||
that transform Pydantic validation errors into user-friendly messages.
|
||||
"""
|
||||
|
||||
from django.test import TestCase, tag
|
||||
import json
|
||||
from pydantic import BaseModel, ValidationError, field_validator
|
||||
from typing import Literal
|
||||
|
||||
from ninja.errors import HttpError
|
||||
from epapi.utils.validation_errors import format_validation_error, handle_validation_error
|
||||
|
||||
|
||||
@tag("api", "utils")
|
||||
class ValidationErrorUtilityTests(TestCase):
|
||||
"""Test validation error utility functions."""
|
||||
|
||||
def test_format_missing_field_error(self):
|
||||
"""Test formatting of missing required field error."""
|
||||
|
||||
# Create a model with required field
|
||||
class TestModel(BaseModel):
|
||||
required_field: str
|
||||
|
||||
# Trigger validation error
|
||||
try:
|
||||
TestModel()
|
||||
except ValidationError as e:
|
||||
errors = e.errors()
|
||||
self.assertEqual(len(errors), 1)
|
||||
formatted = format_validation_error(errors[0])
|
||||
self.assertEqual(formatted, "This field is required")
|
||||
|
||||
def test_format_enum_error(self):
|
||||
"""Test formatting of enum validation error."""
|
||||
|
||||
class TestModel(BaseModel):
|
||||
status: Literal["active", "inactive"]
|
||||
|
||||
try:
|
||||
TestModel(status="invalid")
|
||||
except ValidationError as e:
|
||||
errors = e.errors()
|
||||
self.assertEqual(len(errors), 1)
|
||||
formatted = format_validation_error(errors[0])
|
||||
# Literal errors get formatted as "Please enter ..." with the valid options
|
||||
self.assertIn("Please enter", formatted)
|
||||
self.assertIn("active", formatted)
|
||||
self.assertIn("inactive", formatted)
|
||||
|
||||
def test_format_type_errors(self):
|
||||
"""Test formatting of type validation errors (string, int, float)."""
|
||||
test_cases = [
|
||||
# (field_type, invalid_value, expected_message)
|
||||
# Note: We don't check exact error_type as Pydantic may use different types
|
||||
# (e.g., int_type vs int_parsing) but we verify the formatted message is correct
|
||||
(str, 123, "Please enter a valid string"),
|
||||
(int, "not_a_number", "Please enter a valid int"),
|
||||
(float, "not_a_float", "Please enter a valid float"),
|
||||
]
|
||||
|
||||
for field_type, invalid_value, expected_message in test_cases:
|
||||
with self.subTest(field_type=field_type.__name__):
|
||||
|
||||
class TestModel(BaseModel):
|
||||
field: field_type
|
||||
|
||||
try:
|
||||
TestModel(field=invalid_value)
|
||||
except ValidationError as e:
|
||||
errors = e.errors()
|
||||
self.assertEqual(len(errors), 1)
|
||||
formatted = format_validation_error(errors[0])
|
||||
self.assertEqual(formatted, expected_message)
|
||||
|
||||
def test_format_value_error(self):
|
||||
"""Test formatting of value error from custom validator."""
|
||||
|
||||
class TestModel(BaseModel):
|
||||
age: int
|
||||
|
||||
@field_validator("age")
|
||||
@classmethod
|
||||
def validate_age(cls, v):
|
||||
if v < 0:
|
||||
raise ValueError("Age must be positive")
|
||||
return v
|
||||
|
||||
try:
|
||||
TestModel(age=-5)
|
||||
except ValidationError as e:
|
||||
errors = e.errors()
|
||||
self.assertEqual(len(errors), 1)
|
||||
formatted = format_validation_error(errors[0])
|
||||
self.assertEqual(formatted, "Age must be positive")
|
||||
|
||||
def test_format_unknown_error_type_fallback(self):
|
||||
"""Test that unknown error types fall back to default formatting."""
|
||||
# Mock an error with an unknown type
|
||||
mock_error = {
|
||||
"type": "unknown_custom_type",
|
||||
"msg": "Input should be a valid email address",
|
||||
"ctx": {},
|
||||
}
|
||||
|
||||
formatted = format_validation_error(mock_error)
|
||||
# Should use the else branch which does replacements on the message
|
||||
self.assertEqual(formatted, "Please enter a valid email address")
|
||||
|
||||
def test_handle_validation_error_structure(self):
|
||||
"""Test that handle_validation_error raises HttpError with correct structure."""
|
||||
|
||||
class TestModel(BaseModel):
|
||||
name: str
|
||||
count: int
|
||||
|
||||
try:
|
||||
TestModel(name=123, count="invalid")
|
||||
except ValidationError as e:
|
||||
# handle_validation_error should raise HttpError
|
||||
with self.assertRaises(HttpError) as context:
|
||||
handle_validation_error(e)
|
||||
|
||||
http_error = context.exception
|
||||
self.assertEqual(http_error.status_code, 400)
|
||||
|
||||
# Parse the JSON from the error message
|
||||
error_data = json.loads(http_error.message)
|
||||
|
||||
# Check structure
|
||||
self.assertEqual(error_data["type"], "validation_error")
|
||||
self.assertIn("field_errors", error_data)
|
||||
self.assertIn("message", error_data)
|
||||
self.assertEqual(error_data["message"], "Please correct the errors below")
|
||||
|
||||
# Check that both fields have errors
|
||||
self.assertIn("name", error_data["field_errors"])
|
||||
self.assertIn("count", error_data["field_errors"])
|
||||
|
||||
def test_handle_validation_error_no_pydantic_internals(self):
|
||||
"""Test that handle_validation_error doesn't expose Pydantic internals."""
|
||||
|
||||
class TestModel(BaseModel):
|
||||
email: str
|
||||
|
||||
try:
|
||||
TestModel(email=123)
|
||||
except ValidationError as e:
|
||||
with self.assertRaises(HttpError) as context:
|
||||
handle_validation_error(e)
|
||||
|
||||
http_error = context.exception
|
||||
error_data = json.loads(http_error.message)
|
||||
error_str = json.dumps(error_data)
|
||||
|
||||
# Ensure no Pydantic internals are exposed
|
||||
self.assertNotIn("pydantic", error_str.lower())
|
||||
self.assertNotIn("https://errors.pydantic.dev", error_str)
|
||||
self.assertNotIn("loc", error_str)
|
||||
|
||||
def test_handle_validation_error_user_friendly_messages(self):
|
||||
"""Test that all error messages are user-friendly."""
|
||||
|
||||
class TestModel(BaseModel):
|
||||
name: str
|
||||
age: int
|
||||
status: Literal["active", "inactive"]
|
||||
|
||||
try:
|
||||
TestModel(name=123, status="invalid") # Multiple errors
|
||||
except ValidationError as e:
|
||||
with self.assertRaises(HttpError) as context:
|
||||
handle_validation_error(e)
|
||||
|
||||
http_error = context.exception
|
||||
error_data = json.loads(http_error.message)
|
||||
|
||||
# All messages should be user-friendly (contain "Please" or "This field")
|
||||
for field, messages in error_data["field_errors"].items():
|
||||
for message in messages:
|
||||
# User-friendly messages start with "Please" or "This field"
|
||||
self.assertTrue(
|
||||
message.startswith("Please") or message.startswith("This field"),
|
||||
f"Message '{message}' is not user-friendly",
|
||||
)
|
||||
|
||||
def test_handle_validation_error_multiple_errors_same_field(self):
|
||||
"""Test handling multiple validation errors for the same field."""
|
||||
|
||||
class TestModel(BaseModel):
|
||||
value: int
|
||||
|
||||
@field_validator("value")
|
||||
@classmethod
|
||||
def validate_range(cls, v):
|
||||
if v < 0:
|
||||
raise ValueError("Must be non-negative")
|
||||
if v > 100:
|
||||
raise ValueError("Must be at most 100")
|
||||
return v
|
||||
|
||||
# Test with string (type error) - this will fail before the validator runs
|
||||
try:
|
||||
TestModel(value="invalid")
|
||||
except ValidationError as e:
|
||||
with self.assertRaises(HttpError) as context:
|
||||
handle_validation_error(e)
|
||||
|
||||
http_error = context.exception
|
||||
error_data = json.loads(http_error.message)
|
||||
|
||||
# Should have error for 'value' field
|
||||
self.assertIn("value", error_data["field_errors"])
|
||||
self.assertIsInstance(error_data["field_errors"]["value"], list)
|
||||
self.assertGreater(len(error_data["field_errors"]["value"]), 0)
|
||||
1
epapi/tests/v1/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
# Tests for epapi v1 API
|
||||
448
epapi/tests/v1/test_additional_information.py
Normal file
@ -0,0 +1,448 @@
|
||||
"""
|
||||
Tests for Additional Information API endpoints.
|
||||
|
||||
Tests CRUD operations on scenario additional information including the new PATCH endpoint.
|
||||
"""
|
||||
|
||||
from django.test import TestCase, tag
|
||||
import json
|
||||
from uuid import uuid4
|
||||
|
||||
from epdb.logic import PackageManager, UserManager
|
||||
from epdb.models import Scenario
|
||||
|
||||
|
||||
@tag("api", "additional_information")
|
||||
class AdditionalInformationAPITests(TestCase):
|
||||
"""Test additional information API endpoints."""
|
||||
|
||||
@classmethod
|
||||
def setUpTestData(cls):
|
||||
"""Set up test data: user, package, and scenario."""
|
||||
cls.user = UserManager.create_user(
|
||||
"ai-test-user",
|
||||
"ai-test@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
cls.other_user = UserManager.create_user(
|
||||
"ai-other-user",
|
||||
"ai-other@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
cls.package = PackageManager.create_package(
|
||||
cls.user, "AI Test Package", "Test package for additional information"
|
||||
)
|
||||
# Package owned by other_user (no access for cls.user)
|
||||
cls.other_package = PackageManager.create_package(
|
||||
cls.other_user, "Other Package", "Package without access"
|
||||
)
|
||||
# Create a scenario for testing
|
||||
cls.scenario = Scenario.objects.create(
|
||||
package=cls.package,
|
||||
name="Test Scenario",
|
||||
description="Test scenario for additional information tests",
|
||||
scenario_type="biodegradation",
|
||||
scenario_date="2024-01-01",
|
||||
additional_information={}, # Initialize with empty dict
|
||||
)
|
||||
cls.other_scenario = Scenario.objects.create(
|
||||
package=cls.other_package,
|
||||
name="Other Scenario",
|
||||
description="Scenario in package without access",
|
||||
scenario_type="biodegradation",
|
||||
scenario_date="2024-01-01",
|
||||
additional_information={},
|
||||
)
|
||||
|
||||
def test_list_all_schemas(self):
|
||||
"""Test GET /api/v1/information/schema/ returns all schemas."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
response = self.client.get("/api/v1/information/schema/")
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
data = response.json()
|
||||
self.assertIsInstance(data, dict)
|
||||
# Should have multiple schemas
|
||||
self.assertGreater(len(data), 0)
|
||||
# Each schema should have RJSF format
|
||||
for name, schema in data.items():
|
||||
self.assertIn("schema", schema)
|
||||
self.assertIn("uiSchema", schema)
|
||||
self.assertIn("formData", schema)
|
||||
self.assertIn("groups", schema)
|
||||
|
||||
def test_get_specific_schema(self):
|
||||
"""Test GET /api/v1/information/schema/{model_name}/ returns specific schema."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# Assuming 'temperature' is a valid model
|
||||
response = self.client.get("/api/v1/information/schema/temperature/")
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
data = response.json()
|
||||
self.assertIn("schema", data)
|
||||
self.assertIn("uiSchema", data)
|
||||
|
||||
def test_get_nonexistent_schema_returns_404(self):
|
||||
"""Test GET for non-existent schema returns 404."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
response = self.client.get("/api/v1/information/schema/nonexistent/")
|
||||
|
||||
self.assertEqual(response.status_code, 404)
|
||||
|
||||
def test_list_scenario_information_empty(self):
|
||||
"""Test GET /api/v1/scenario/{uuid}/information/ returns empty list initially."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
response = self.client.get(f"/api/v1/scenario/{self.scenario.uuid}/information/")
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
data = response.json()
|
||||
self.assertIsInstance(data, list)
|
||||
self.assertEqual(len(data), 0)
|
||||
|
||||
def test_create_additional_information(self):
|
||||
"""Test POST creates additional information."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# Create temperature information (assuming temperature model exists)
|
||||
payload = {"interval": {"start": 20, "end": 25}}
|
||||
response = self.client.post(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
data = response.json()
|
||||
self.assertEqual(data["status"], "created")
|
||||
self.assertIn("uuid", data)
|
||||
self.assertIsNotNone(data["uuid"])
|
||||
|
||||
def test_create_with_invalid_data_returns_400(self):
|
||||
"""Test POST with invalid data returns 400 with validation errors."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# Invalid data (missing required fields or wrong types)
|
||||
payload = {"invalid_field": "value"}
|
||||
response = self.client.post(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 400)
|
||||
data = response.json()
|
||||
# Should have validation error details in 'detail' field
|
||||
self.assertIn("detail", data)
|
||||
|
||||
def test_validation_errors_are_user_friendly(self):
|
||||
"""Test that validation errors are user-friendly and field-specific."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# Invalid data - wrong type (string instead of number in interval)
|
||||
payload = {"interval": {"start": "not_a_number", "end": 25}}
|
||||
response = self.client.post(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 400)
|
||||
data = response.json()
|
||||
|
||||
# Parse the error response - Django Ninja wraps errors in 'detail'
|
||||
error_str = data.get("detail") or data.get("error")
|
||||
self.assertIsNotNone(error_str, "Response should contain error details")
|
||||
|
||||
# Parse the JSON error string
|
||||
error_data = json.loads(error_str)
|
||||
|
||||
# Check structure
|
||||
self.assertEqual(error_data.get("type"), "validation_error")
|
||||
self.assertIn("field_errors", error_data)
|
||||
self.assertIn("message", error_data)
|
||||
|
||||
# Ensure error messages are user-friendly (no Pydantic URLs or technical jargon)
|
||||
error_str = json.dumps(error_data)
|
||||
self.assertNotIn("pydantic", error_str.lower())
|
||||
self.assertNotIn("https://errors.pydantic.dev", error_str)
|
||||
self.assertNotIn("loc", error_str) # No technical field like 'loc'
|
||||
|
||||
# Check that error message is helpful
|
||||
self.assertIn("Please", error_data["message"]) # User-friendly language
|
||||
|
||||
def test_patch_additional_information(self):
|
||||
"""Test PATCH updates existing additional information."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# First create an item
|
||||
create_payload = {"interval": {"start": 20, "end": 25}}
|
||||
create_response = self.client.post(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(create_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
item_uuid = create_response.json()["uuid"]
|
||||
|
||||
# Then update it with PATCH
|
||||
update_payload = {"interval": {"start": 30, "end": 35}}
|
||||
patch_response = self.client.patch(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/item/{item_uuid}/",
|
||||
data=json.dumps(update_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(patch_response.status_code, 200)
|
||||
data = patch_response.json()
|
||||
self.assertEqual(data["status"], "updated")
|
||||
self.assertEqual(data["uuid"], item_uuid) # UUID preserved
|
||||
|
||||
# Verify the data was updated
|
||||
list_response = self.client.get(f"/api/v1/scenario/{self.scenario.uuid}/information/")
|
||||
items = list_response.json()
|
||||
self.assertEqual(len(items), 1)
|
||||
updated_item = items[0]
|
||||
self.assertEqual(updated_item["uuid"], item_uuid)
|
||||
self.assertEqual(updated_item["data"]["interval"]["start"], 30)
|
||||
self.assertEqual(updated_item["data"]["interval"]["end"], 35)
|
||||
|
||||
def test_patch_nonexistent_item_returns_404(self):
|
||||
"""Test PATCH on non-existent item returns 404."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
fake_uuid = str(uuid4())
|
||||
payload = {"interval": {"start": 30, "end": 35}}
|
||||
response = self.client.patch(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/item/{fake_uuid}/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 404)
|
||||
|
||||
def test_patch_with_invalid_data_returns_400(self):
|
||||
"""Test PATCH with invalid data returns 400."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# First create an item
|
||||
create_payload = {"interval": {"start": 20, "end": 25}}
|
||||
create_response = self.client.post(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(create_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
item_uuid = create_response.json()["uuid"]
|
||||
|
||||
# Try to update with invalid data
|
||||
invalid_payload = {"invalid_field": "value"}
|
||||
patch_response = self.client.patch(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/item/{item_uuid}/",
|
||||
data=json.dumps(invalid_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(patch_response.status_code, 400)
|
||||
|
||||
def test_patch_validation_errors_are_user_friendly(self):
|
||||
"""Test that PATCH validation errors are user-friendly and field-specific."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# First create an item
|
||||
create_payload = {"interval": {"start": 20, "end": 25}}
|
||||
create_response = self.client.post(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(create_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
item_uuid = create_response.json()["uuid"]
|
||||
|
||||
# Update with invalid data - wrong type (string instead of number in interval)
|
||||
invalid_payload = {"interval": {"start": "not_a_number", "end": 25}}
|
||||
patch_response = self.client.patch(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/item/{item_uuid}/",
|
||||
data=json.dumps(invalid_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(patch_response.status_code, 400)
|
||||
data = patch_response.json()
|
||||
|
||||
# Parse the error response - Django Ninja wraps errors in 'detail'
|
||||
error_str = data.get("detail") or data.get("error")
|
||||
self.assertIsNotNone(error_str, "Response should contain error details")
|
||||
|
||||
# Parse the JSON error string
|
||||
error_data = json.loads(error_str)
|
||||
|
||||
# Check structure
|
||||
self.assertEqual(error_data.get("type"), "validation_error")
|
||||
self.assertIn("field_errors", error_data)
|
||||
self.assertIn("message", error_data)
|
||||
|
||||
# Ensure error messages are user-friendly (no Pydantic URLs or technical jargon)
|
||||
error_str = json.dumps(error_data)
|
||||
self.assertNotIn("pydantic", error_str.lower())
|
||||
self.assertNotIn("https://errors.pydantic.dev", error_str)
|
||||
self.assertNotIn("loc", error_str) # No technical field like 'loc'
|
||||
|
||||
# Check that error message is helpful
|
||||
self.assertIn("Please", error_data["message"]) # User-friendly language
|
||||
|
||||
def test_delete_additional_information(self):
|
||||
"""Test DELETE removes additional information."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# Create an item
|
||||
create_payload = {"interval": {"start": 20, "end": 25}}
|
||||
create_response = self.client.post(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(create_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
item_uuid = create_response.json()["uuid"]
|
||||
|
||||
# Delete it
|
||||
delete_response = self.client.delete(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/item/{item_uuid}/"
|
||||
)
|
||||
|
||||
self.assertEqual(delete_response.status_code, 200)
|
||||
data = delete_response.json()
|
||||
self.assertEqual(data["status"], "deleted")
|
||||
|
||||
# Verify deletion
|
||||
list_response = self.client.get(f"/api/v1/scenario/{self.scenario.uuid}/information/")
|
||||
items = list_response.json()
|
||||
self.assertEqual(len(items), 0)
|
||||
|
||||
def test_delete_nonexistent_item_returns_404(self):
|
||||
"""Test DELETE on non-existent item returns 404."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
fake_uuid = str(uuid4())
|
||||
response = self.client.delete(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/item/{fake_uuid}/"
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 404)
|
||||
|
||||
def test_multiple_items_crud(self):
|
||||
"""Test creating, updating, and deleting multiple items."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# Create first item
|
||||
item1_payload = {"interval": {"start": 20, "end": 25}}
|
||||
response1 = self.client.post(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(item1_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
item1_uuid = response1.json()["uuid"]
|
||||
|
||||
# Create second item (different type if available, or same type)
|
||||
item2_payload = {"interval": {"start": 30, "end": 35}}
|
||||
response2 = self.client.post(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(item2_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
item2_uuid = response2.json()["uuid"]
|
||||
|
||||
# Verify both exist
|
||||
list_response = self.client.get(f"/api/v1/scenario/{self.scenario.uuid}/information/")
|
||||
items = list_response.json()
|
||||
self.assertEqual(len(items), 2)
|
||||
|
||||
# Update first item
|
||||
update_payload = {"interval": {"start": 15, "end": 20}}
|
||||
self.client.patch(
|
||||
f"/api/v1/scenario/{self.scenario.uuid}/information/item/{item1_uuid}/",
|
||||
data=json.dumps(update_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
# Delete second item
|
||||
self.client.delete(f"/api/v1/scenario/{self.scenario.uuid}/information/item/{item2_uuid}/")
|
||||
|
||||
# Verify final state: one item with updated data
|
||||
list_response = self.client.get(f"/api/v1/scenario/{self.scenario.uuid}/information/")
|
||||
items = list_response.json()
|
||||
self.assertEqual(len(items), 1)
|
||||
self.assertEqual(items[0]["uuid"], item1_uuid)
|
||||
self.assertEqual(items[0]["data"]["interval"]["start"], 15)
|
||||
|
||||
def test_list_info_denied_without_permission(self):
|
||||
"""User cannot list info for scenario in package they don't have access to"""
|
||||
self.client.force_login(self.user)
|
||||
response = self.client.get(f"/api/v1/scenario/{self.other_scenario.uuid}/information/")
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
def test_add_info_denied_without_permission(self):
|
||||
"""User cannot add info to scenario in package they don't have access to"""
|
||||
self.client.force_login(self.user)
|
||||
payload = {"interval": {"start": 25, "end": 30}}
|
||||
response = self.client.post(
|
||||
f"/api/v1/scenario/{self.other_scenario.uuid}/information/temperature/",
|
||||
json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
def test_update_info_denied_without_permission(self):
|
||||
"""User cannot update info in scenario they don't have access to"""
|
||||
self.client.force_login(self.other_user)
|
||||
# First create an item as other_user
|
||||
create_payload = {"interval": {"start": 20, "end": 25}}
|
||||
create_response = self.client.post(
|
||||
f"/api/v1/scenario/{self.other_scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(create_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
item_uuid = create_response.json()["uuid"]
|
||||
|
||||
# Try to update as user (who doesn't have access)
|
||||
self.client.force_login(self.user)
|
||||
update_payload = {"interval": {"start": 30, "end": 35}}
|
||||
response = self.client.patch(
|
||||
f"/api/v1/scenario/{self.other_scenario.uuid}/information/item/{item_uuid}/",
|
||||
data=json.dumps(update_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
def test_delete_info_denied_without_permission(self):
|
||||
"""User cannot delete info from scenario they don't have access to"""
|
||||
self.client.force_login(self.other_user)
|
||||
# First create an item as other_user
|
||||
create_payload = {"interval": {"start": 20, "end": 25}}
|
||||
create_response = self.client.post(
|
||||
f"/api/v1/scenario/{self.other_scenario.uuid}/information/temperature/",
|
||||
data=json.dumps(create_payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
item_uuid = create_response.json()["uuid"]
|
||||
|
||||
# Try to delete as user (who doesn't have access)
|
||||
self.client.force_login(self.user)
|
||||
response = self.client.delete(
|
||||
f"/api/v1/scenario/{self.other_scenario.uuid}/information/item/{item_uuid}/"
|
||||
)
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
def test_nonexistent_scenario_returns_404(self):
|
||||
"""Test operations on non-existent scenario return 404."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
fake_uuid = uuid4()
|
||||
response = self.client.get(f"/api/v1/scenario/{fake_uuid}/information/")
|
||||
|
||||
self.assertEqual(response.status_code, 404)
|
||||
477
epapi/tests/v1/test_api_permissions.py
Normal file
@ -0,0 +1,477 @@
|
||||
from django.test import TestCase, tag
|
||||
|
||||
from epdb.logic import GroupManager, PackageManager, UserManager
|
||||
from epdb.models import (
|
||||
Compound,
|
||||
GroupPackagePermission,
|
||||
Permission,
|
||||
UserPackagePermission,
|
||||
)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class APIPermissionTestBase(TestCase):
|
||||
"""
|
||||
Base class for API permission tests.
|
||||
|
||||
Sets up common test data:
|
||||
- user1: Owner of packages
|
||||
- user2: User with various permissions
|
||||
- user3: User with no permissions
|
||||
- reviewed_package: Public package (reviewed=True)
|
||||
- unreviewed_package_owned: Unreviewed package owned by user1
|
||||
- unreviewed_package_read: Unreviewed package with READ permission for user2
|
||||
- unreviewed_package_write: Unreviewed package with WRITE permission for user2
|
||||
- unreviewed_package_all: Unreviewed package with ALL permission for user2
|
||||
- unreviewed_package_no_access: Unreviewed package with no permissions for user2/user3
|
||||
- group_package: Unreviewed package accessible via group permission
|
||||
- test_group: Group containing user2
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def setUpTestData(cls):
|
||||
# Create users
|
||||
cls.user1 = UserManager.create_user(
|
||||
"permission-user1",
|
||||
"permission-user1@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
cls.user2 = UserManager.create_user(
|
||||
"permission-user2",
|
||||
"permission-user2@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
cls.user3 = UserManager.create_user(
|
||||
"permission-user3",
|
||||
"permission-user3@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
|
||||
# Delete default packages to ensure clean test data
|
||||
for user in [cls.user1, cls.user2, cls.user3]:
|
||||
default_pkg = user.default_package
|
||||
user.default_package = None
|
||||
user.save()
|
||||
if default_pkg:
|
||||
default_pkg.delete()
|
||||
|
||||
# Create reviewed package (public)
|
||||
cls.reviewed_package = PackageManager.create_package(
|
||||
cls.user1, "Reviewed Package", "Public package"
|
||||
)
|
||||
cls.reviewed_package.reviewed = True
|
||||
cls.reviewed_package.save()
|
||||
|
||||
# Create unreviewed packages with various permissions
|
||||
cls.unreviewed_package_owned = PackageManager.create_package(
|
||||
cls.user1, "User1 Owned Package", "Owned by user1"
|
||||
)
|
||||
|
||||
cls.unreviewed_package_read = PackageManager.create_package(
|
||||
cls.user1, "User2 Read Package", "User2 has READ permission"
|
||||
)
|
||||
UserPackagePermission.objects.create(
|
||||
user=cls.user2, package=cls.unreviewed_package_read, permission=Permission.READ[0]
|
||||
)
|
||||
|
||||
cls.unreviewed_package_write = PackageManager.create_package(
|
||||
cls.user1, "User2 Write Package", "User2 has WRITE permission"
|
||||
)
|
||||
UserPackagePermission.objects.create(
|
||||
user=cls.user2, package=cls.unreviewed_package_write, permission=Permission.WRITE[0]
|
||||
)
|
||||
|
||||
cls.unreviewed_package_all = PackageManager.create_package(
|
||||
cls.user1, "User2 All Package", "User2 has ALL permission"
|
||||
)
|
||||
UserPackagePermission.objects.create(
|
||||
user=cls.user2, package=cls.unreviewed_package_all, permission=Permission.ALL[0]
|
||||
)
|
||||
|
||||
cls.unreviewed_package_no_access = PackageManager.create_package(
|
||||
cls.user1, "No Access Package", "No permissions for user2/user3"
|
||||
)
|
||||
|
||||
# Create group and group package
|
||||
cls.test_group = GroupManager.create_group(
|
||||
cls.user1, "Test Group", "Group for permission testing"
|
||||
)
|
||||
cls.test_group.user_member.add(cls.user2)
|
||||
cls.test_group.save()
|
||||
|
||||
cls.group_package = PackageManager.create_package(
|
||||
cls.user1, "Group Package", "Accessible via group permission"
|
||||
)
|
||||
GroupPackagePermission.objects.create(
|
||||
group=cls.test_group, package=cls.group_package, permission=Permission.READ[0]
|
||||
)
|
||||
|
||||
# Create test compounds in each package
|
||||
cls.reviewed_compound = Compound.create(
|
||||
cls.reviewed_package, "C", "Reviewed Compound", "Test compound"
|
||||
)
|
||||
cls.owned_compound = Compound.create(
|
||||
cls.unreviewed_package_owned, "CC", "Owned Compound", "Test compound"
|
||||
)
|
||||
cls.read_compound = Compound.create(
|
||||
cls.unreviewed_package_read, "CCC", "Read Compound", "Test compound"
|
||||
)
|
||||
cls.write_compound = Compound.create(
|
||||
cls.unreviewed_package_write, "CCCC", "Write Compound", "Test compound"
|
||||
)
|
||||
cls.all_compound = Compound.create(
|
||||
cls.unreviewed_package_all, "CCCCC", "All Compound", "Test compound"
|
||||
)
|
||||
cls.no_access_compound = Compound.create(
|
||||
cls.unreviewed_package_no_access, "CCCCCC", "No Access Compound", "Test compound"
|
||||
)
|
||||
cls.group_compound = Compound.create(
|
||||
cls.group_package, "CCCCCCC", "Group Compound", "Test compound"
|
||||
)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class PackageListPermissionTest(APIPermissionTestBase):
|
||||
"""
|
||||
Test permissions for /api/v1/packages/ endpoint.
|
||||
|
||||
Special case: This endpoint allows anonymous access (auth=None)
|
||||
"""
|
||||
|
||||
ENDPOINT = "/api/v1/packages/"
|
||||
|
||||
def test_anonymous_user_sees_only_reviewed_packages(self):
|
||||
"""Anonymous users should only see reviewed packages."""
|
||||
self.client.logout()
|
||||
response = self.client.get(self.ENDPOINT)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# Should only see reviewed package
|
||||
self.assertEqual(payload["total_items"], 1)
|
||||
self.assertEqual(payload["items"][0]["uuid"], str(self.reviewed_package.uuid))
|
||||
self.assertEqual(payload["items"][0]["review_status"], "reviewed")
|
||||
|
||||
def test_authenticated_user_sees_all_readable_packages(self):
|
||||
"""Authenticated users see reviewed + packages they have access to."""
|
||||
self.client.force_login(self.user2)
|
||||
response = self.client.get(self.ENDPOINT)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# user2 should see:
|
||||
# - reviewed_package (public)
|
||||
# - unreviewed_package_read (READ permission)
|
||||
# - unreviewed_package_write (WRITE permission)
|
||||
# - unreviewed_package_all (ALL permission)
|
||||
# - group_package (via group membership)
|
||||
# Total: 5 packages
|
||||
self.assertEqual(payload["total_items"], 5)
|
||||
|
||||
visible_uuids = {item["uuid"] for item in payload["items"]}
|
||||
expected_uuids = {
|
||||
str(self.reviewed_package.uuid),
|
||||
str(self.unreviewed_package_read.uuid),
|
||||
str(self.unreviewed_package_write.uuid),
|
||||
str(self.unreviewed_package_all.uuid),
|
||||
str(self.group_package.uuid),
|
||||
}
|
||||
self.assertEqual(visible_uuids, expected_uuids)
|
||||
|
||||
def test_owner_sees_all_owned_packages(self):
|
||||
"""Package owner sees all packages they created."""
|
||||
self.client.force_login(self.user1)
|
||||
response = self.client.get(self.ENDPOINT)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# user1 owns all packages
|
||||
# Total: 7 packages (all packages created in setUpTestData)
|
||||
self.assertEqual(payload["total_items"], 7)
|
||||
|
||||
def test_filter_by_review_status_true(self):
|
||||
"""Filter to show only reviewed packages."""
|
||||
self.client.force_login(self.user2)
|
||||
response = self.client.get(self.ENDPOINT, {"review_status": True})
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# Only reviewed_package
|
||||
self.assertEqual(payload["total_items"], 1)
|
||||
self.assertTrue(all(item["review_status"] == "reviewed" for item in payload["items"]))
|
||||
|
||||
def test_filter_by_review_status_false(self):
|
||||
"""Filter to show only unreviewed packages."""
|
||||
self.client.force_login(self.user2)
|
||||
response = self.client.get(self.ENDPOINT, {"review_status": False})
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# user2's accessible unreviewed packages: 4
|
||||
self.assertEqual(payload["total_items"], 4)
|
||||
self.assertTrue(all(item["review_status"] == "unreviewed" for item in payload["items"]))
|
||||
|
||||
def test_anonymous_filter_unreviewed_returns_empty(self):
|
||||
"""Anonymous users get no results when filtering for unreviewed."""
|
||||
self.client.logout()
|
||||
response = self.client.get(self.ENDPOINT, {"review_status": False})
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
self.assertEqual(payload["total_items"], 0)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class GlobalCompoundListPermissionTest(APIPermissionTestBase):
|
||||
"""
|
||||
Test permissions for /api/v1/compounds/ endpoint.
|
||||
|
||||
This endpoint requires authentication.
|
||||
"""
|
||||
|
||||
ENDPOINT = "/api/v1/compounds/"
|
||||
|
||||
def test_anonymous_user_cannot_access(self):
|
||||
"""Anonymous users should get 401 Unauthorized."""
|
||||
self.client.logout()
|
||||
response = self.client.get(self.ENDPOINT)
|
||||
|
||||
self.assertEqual(response.status_code, 401)
|
||||
|
||||
def test_authenticated_user_sees_compounds_from_readable_packages(self):
|
||||
"""Authenticated users see compounds from packages they can read."""
|
||||
self.client.force_login(self.user2)
|
||||
response = self.client.get(self.ENDPOINT)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
self.assertEqual(payload["total_items"], 5)
|
||||
|
||||
visible_uuids = {item["uuid"] for item in payload["items"]}
|
||||
expected_uuids = {
|
||||
str(self.reviewed_compound.uuid),
|
||||
str(self.read_compound.uuid),
|
||||
str(self.write_compound.uuid),
|
||||
str(self.all_compound.uuid),
|
||||
str(self.group_compound.uuid),
|
||||
}
|
||||
self.assertEqual(visible_uuids, expected_uuids)
|
||||
|
||||
def test_user_without_permission_cannot_see_compound(self):
|
||||
"""User without permission to package cannot see its compounds."""
|
||||
self.client.force_login(self.user3)
|
||||
response = self.client.get(self.ENDPOINT)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# user3 should only see compounds from reviewed_package
|
||||
self.assertEqual(payload["total_items"], 1)
|
||||
self.assertEqual(payload["items"][0]["uuid"], str(self.reviewed_compound.uuid))
|
||||
|
||||
def test_owner_sees_all_compounds(self):
|
||||
"""Package owner sees all compounds they created."""
|
||||
self.client.force_login(self.user1)
|
||||
response = self.client.get(self.ENDPOINT)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# user1 owns all packages, so sees all compounds
|
||||
self.assertEqual(payload["total_items"], 7)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class PackageScopedCompoundListPermissionTest(APIPermissionTestBase):
|
||||
"""
|
||||
Test permissions for /api/v1/package/{uuid}/compound/ endpoint.
|
||||
|
||||
This endpoint requires authentication AND package access.
|
||||
"""
|
||||
|
||||
def test_anonymous_user_cannot_access_reviewed_package(self):
|
||||
"""Anonymous users should get 401 even for reviewed packages."""
|
||||
self.client.logout()
|
||||
endpoint = f"/api/v1/package/{self.reviewed_package.uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 401)
|
||||
|
||||
def test_authenticated_user_can_access_reviewed_package(self):
|
||||
"""Authenticated users can access reviewed packages."""
|
||||
self.client.force_login(self.user3)
|
||||
endpoint = f"/api/v1/package/{self.reviewed_package.uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
self.assertEqual(payload["total_items"], 1)
|
||||
self.assertEqual(payload["items"][0]["uuid"], str(self.reviewed_compound.uuid))
|
||||
|
||||
def test_user_can_access_package_with_read_permission(self):
|
||||
"""User with READ permission can access package-scoped endpoint."""
|
||||
self.client.force_login(self.user2)
|
||||
endpoint = f"/api/v1/package/{self.unreviewed_package_read.uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
self.assertEqual(payload["total_items"], 1)
|
||||
self.assertEqual(payload["items"][0]["uuid"], str(self.read_compound.uuid))
|
||||
|
||||
def test_user_can_access_package_with_write_permission(self):
|
||||
"""User with WRITE permission can access package-scoped endpoint."""
|
||||
self.client.force_login(self.user2)
|
||||
endpoint = f"/api/v1/package/{self.unreviewed_package_write.uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
self.assertEqual(payload["total_items"], 1)
|
||||
self.assertEqual(payload["items"][0]["uuid"], str(self.write_compound.uuid))
|
||||
|
||||
def test_user_can_access_package_with_all_permission(self):
|
||||
"""User with ALL permission can access package-scoped endpoint."""
|
||||
self.client.force_login(self.user2)
|
||||
endpoint = f"/api/v1/package/{self.unreviewed_package_all.uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
self.assertEqual(payload["total_items"], 1)
|
||||
self.assertEqual(payload["items"][0]["uuid"], str(self.all_compound.uuid))
|
||||
|
||||
def test_user_cannot_access_package_without_permission(self):
|
||||
"""User without permission gets 403 Forbidden."""
|
||||
self.client.force_login(self.user2)
|
||||
endpoint = f"/api/v1/package/{self.unreviewed_package_no_access.uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
def test_nonexistent_package_returns_404(self):
|
||||
"""Request for non-existent package returns 404."""
|
||||
self.client.force_login(self.user2)
|
||||
fake_uuid = "00000000-0000-0000-0000-000000000000"
|
||||
endpoint = f"/api/v1/package/{fake_uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 404)
|
||||
|
||||
def test_owner_can_access_owned_package(self):
|
||||
"""Package owner can access their package."""
|
||||
self.client.force_login(self.user1)
|
||||
endpoint = f"/api/v1/package/{self.unreviewed_package_owned.uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
self.assertEqual(payload["total_items"], 1)
|
||||
self.assertEqual(payload["items"][0]["uuid"], str(self.owned_compound.uuid))
|
||||
|
||||
def test_group_member_can_access_group_package(self):
|
||||
"""Group member can access package via group permission."""
|
||||
self.client.force_login(self.user2)
|
||||
endpoint = f"/api/v1/package/{self.group_package.uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
self.assertEqual(payload["total_items"], 1)
|
||||
self.assertEqual(payload["items"][0]["uuid"], str(self.group_compound.uuid))
|
||||
|
||||
def test_non_group_member_cannot_access_group_package(self):
|
||||
"""Non-group member cannot access package with only group permission."""
|
||||
self.client.force_login(self.user3)
|
||||
endpoint = f"/api/v1/package/{self.group_package.uuid}/compound/"
|
||||
response = self.client.get(endpoint)
|
||||
|
||||
self.assertEqual(response.status_code, 403)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class MultiResourcePermissionTest(APIPermissionTestBase):
|
||||
"""
|
||||
Test that permission system works consistently across all resource types.
|
||||
|
||||
Tests a sample of other endpoints to ensure permission logic is consistent.
|
||||
"""
|
||||
|
||||
def test_rules_endpoint_respects_permissions(self):
|
||||
"""Rules endpoint uses same permission logic."""
|
||||
from epdb.models import SimpleAmbitRule
|
||||
|
||||
# Create rule in no-access package
|
||||
rule = SimpleAmbitRule.create(
|
||||
self.unreviewed_package_no_access, "Test Rule", "Test", "[C:1]>>[C:1]O"
|
||||
)
|
||||
|
||||
self.client.force_login(self.user2)
|
||||
response = self.client.get("/api/v1/rules/")
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# user2 should not see the rule from no_access_package
|
||||
rule_uuids = [item["uuid"] for item in payload["items"]]
|
||||
self.assertNotIn(str(rule.uuid), rule_uuids)
|
||||
|
||||
def test_reactions_endpoint_respects_permissions(self):
|
||||
"""Reactions endpoint uses same permission logic."""
|
||||
from epdb.models import Reaction
|
||||
|
||||
# Create reaction in no-access package
|
||||
reaction = Reaction.create(
|
||||
self.unreviewed_package_no_access, "Test Reaction", "Test", ["C"], ["CO"]
|
||||
)
|
||||
|
||||
self.client.force_login(self.user2)
|
||||
response = self.client.get("/api/v1/reactions/")
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# user2 should not see the reaction from no_access_package
|
||||
reaction_uuids = [item["uuid"] for item in payload["items"]]
|
||||
self.assertNotIn(str(reaction.uuid), reaction_uuids)
|
||||
|
||||
def test_pathways_endpoint_respects_permissions(self):
|
||||
"""Pathways endpoint uses same permission logic."""
|
||||
from epdb.models import Pathway
|
||||
|
||||
# Create pathway in no-access package
|
||||
pathway = Pathway.objects.create(
|
||||
package=self.unreviewed_package_no_access, name="Test Pathway", description="Test"
|
||||
)
|
||||
|
||||
self.client.force_login(self.user2)
|
||||
response = self.client.get("/api/v1/pathways/")
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
|
||||
# user2 should not see the pathway from no_access_package
|
||||
pathway_uuids = [item["uuid"] for item in payload["items"]]
|
||||
self.assertNotIn(str(pathway.uuid), pathway_uuids)
|
||||
477
epapi/tests/v1/test_contract_get_entities.py
Normal file
@ -0,0 +1,477 @@
|
||||
from django.test import TestCase, tag
|
||||
|
||||
from epdb.logic import PackageManager, UserManager
|
||||
from epdb.models import Compound, Reaction, Pathway, EPModel, SimpleAmbitRule, Scenario
|
||||
|
||||
|
||||
class BaseTestAPIGetPaginated:
|
||||
"""
|
||||
Mixin class for API pagination tests.
|
||||
|
||||
Subclasses must inherit from both this class and TestCase, e.g.:
|
||||
class MyTest(BaseTestAPIGetPaginated, TestCase):
|
||||
...
|
||||
|
||||
Subclasses must define:
|
||||
- resource_name: Singular name (e.g., "compound")
|
||||
- resource_name_plural: Plural name (e.g., "compounds")
|
||||
- global_endpoint: Global listing endpoint (e.g., "/api/v1/compounds/")
|
||||
- package_endpoint_template: Template for package-scoped endpoint or None
|
||||
- total_reviewed: Number of reviewed items to create
|
||||
- total_unreviewed: Number of unreviewed items to create
|
||||
- create_reviewed_resource(cls, package, idx): Factory method
|
||||
- create_unreviewed_resource(cls, package, idx): Factory method
|
||||
"""
|
||||
|
||||
# Configuration to be overridden by subclasses
|
||||
resource_name = None
|
||||
resource_name_plural = None
|
||||
global_endpoint = None
|
||||
package_endpoint_template = None
|
||||
total_reviewed = 50
|
||||
total_unreviewed = 20
|
||||
default_page_size = 50
|
||||
max_page_size = 100
|
||||
|
||||
@classmethod
|
||||
def setUpTestData(cls):
|
||||
# Create test user
|
||||
cls.user = UserManager.create_user(
|
||||
f"{cls.resource_name}-user",
|
||||
f"{cls.resource_name}-user@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
|
||||
# Delete the auto-created default package to ensure clean test data
|
||||
default_pkg = cls.user.default_package
|
||||
cls.user.default_package = None
|
||||
cls.user.save()
|
||||
default_pkg.delete()
|
||||
|
||||
# Create reviewed package
|
||||
cls.reviewed_package = PackageManager.create_package(
|
||||
cls.user, "Reviewed Package", f"Reviewed package for {cls.resource_name} tests"
|
||||
)
|
||||
cls.reviewed_package.reviewed = True
|
||||
cls.reviewed_package.save()
|
||||
|
||||
# Create unreviewed package
|
||||
cls.unreviewed_package = PackageManager.create_package(
|
||||
cls.user, "Draft Package", f"Unreviewed package for {cls.resource_name} tests"
|
||||
)
|
||||
|
||||
# Create reviewed resources
|
||||
for idx in range(cls.total_reviewed):
|
||||
cls.create_reviewed_resource(cls.reviewed_package, idx)
|
||||
|
||||
# Create unreviewed resources
|
||||
for idx in range(cls.total_unreviewed):
|
||||
cls.create_unreviewed_resource(cls.unreviewed_package, idx)
|
||||
|
||||
# Set up package-scoped endpoints if applicable
|
||||
if cls.package_endpoint_template:
|
||||
cls.reviewed_package_endpoint = cls.package_endpoint_template.format(
|
||||
uuid=cls.reviewed_package.uuid
|
||||
)
|
||||
cls.unreviewed_package_endpoint = cls.package_endpoint_template.format(
|
||||
uuid=cls.unreviewed_package.uuid
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_reviewed_resource(cls, package, idx):
|
||||
"""
|
||||
Create a single reviewed resource.
|
||||
Must be implemented by subclass.
|
||||
|
||||
Args:
|
||||
package: The package to create the resource in
|
||||
idx: Index of the resource (0-based)
|
||||
"""
|
||||
raise NotImplementedError(f"{cls.__name__} must implement create_reviewed_resource()")
|
||||
|
||||
@classmethod
|
||||
def create_unreviewed_resource(cls, package, idx):
|
||||
"""
|
||||
Create a single unreviewed resource.
|
||||
Must be implemented by subclass.
|
||||
|
||||
Args:
|
||||
package: The package to create the resource in
|
||||
idx: Index of the resource (0-based)
|
||||
"""
|
||||
raise NotImplementedError(f"{cls.__name__} must implement create_unreviewed_resource()")
|
||||
|
||||
def setUp(self):
|
||||
self.client.force_login(self.user)
|
||||
|
||||
def test_requires_session_authentication(self):
|
||||
"""Test that the global endpoint requires authentication."""
|
||||
self.client.logout()
|
||||
response = self.client.get(self.global_endpoint)
|
||||
self.assertEqual(response.status_code, 401)
|
||||
|
||||
def test_global_listing_uses_default_page_size(self):
|
||||
"""Test that the global endpoint uses default pagination settings."""
|
||||
response = self.client.get(self.global_endpoint, {"review_status": True})
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
self.assertEqual(payload["page"], 1)
|
||||
self.assertEqual(payload["page_size"], self.default_page_size)
|
||||
self.assertEqual(payload["total_items"], self.total_reviewed)
|
||||
|
||||
# Verify only reviewed items are returned
|
||||
self.assertTrue(all(item["review_status"] == "reviewed" for item in payload["items"]))
|
||||
|
||||
def test_can_request_later_page(self):
|
||||
"""Test that pagination works for later pages."""
|
||||
if self.total_reviewed <= self.default_page_size:
|
||||
self.skipTest(
|
||||
f"Not enough items to test pagination "
|
||||
f"({self.total_reviewed} <= {self.default_page_size})"
|
||||
)
|
||||
|
||||
response = self.client.get(self.global_endpoint, {"page": 2, "review_status": True})
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
self.assertEqual(payload["page"], 2)
|
||||
|
||||
# Calculate expected items on page 2
|
||||
expected_items = min(self.default_page_size, self.total_reviewed - self.default_page_size)
|
||||
self.assertEqual(len(payload["items"]), expected_items)
|
||||
|
||||
# Verify only reviewed items are returned
|
||||
self.assertTrue(all(item["review_status"] == "reviewed" for item in payload["items"]))
|
||||
|
||||
def test_page_size_is_capped(self):
|
||||
"""Test that page size is capped at the maximum."""
|
||||
if self.total_reviewed <= self.max_page_size:
|
||||
self.skipTest(
|
||||
f"Not enough items to test page size cap "
|
||||
f"({self.total_reviewed} <= {self.max_page_size})"
|
||||
)
|
||||
|
||||
response = self.client.get(self.global_endpoint, {"page_size": 150})
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
self.assertEqual(payload["page_size"], self.max_page_size)
|
||||
self.assertEqual(len(payload["items"]), self.max_page_size)
|
||||
|
||||
def test_package_endpoint_for_reviewed_package(self):
|
||||
"""Test the package-scoped endpoint for reviewed packages."""
|
||||
if not self.package_endpoint_template:
|
||||
self.skipTest("No package endpoint for this resource")
|
||||
|
||||
response = self.client.get(self.reviewed_package_endpoint)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
self.assertEqual(payload["total_items"], self.total_reviewed)
|
||||
|
||||
# Verify only reviewed items are returned
|
||||
self.assertTrue(all(item["review_status"] == "reviewed" for item in payload["items"]))
|
||||
|
||||
def test_package_endpoint_for_unreviewed_package(self):
|
||||
"""Test the package-scoped endpoint for unreviewed packages."""
|
||||
if not self.package_endpoint_template:
|
||||
self.skipTest("No package endpoint for this resource")
|
||||
|
||||
response = self.client.get(self.unreviewed_package_endpoint)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
self.assertEqual(payload["total_items"], self.total_unreviewed)
|
||||
|
||||
# Verify only unreviewed items are returned
|
||||
self.assertTrue(all(item["review_status"] == "unreviewed" for item in payload["items"]))
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class PackagePaginationAPITest(TestCase):
|
||||
ENDPOINT = "/api/v1/packages/"
|
||||
|
||||
@classmethod
|
||||
def setUpTestData(cls):
|
||||
cls.user = UserManager.create_user(
|
||||
"package-user",
|
||||
"package-user@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
|
||||
# Delete the auto-created default package to ensure clean test data
|
||||
default_pkg = cls.user.default_package
|
||||
cls.user.default_package = None
|
||||
cls.user.save()
|
||||
default_pkg.delete()
|
||||
|
||||
# Create reviewed packages
|
||||
cls.total_reviewed = 25
|
||||
for idx in range(cls.total_reviewed):
|
||||
package = PackageManager.create_package(
|
||||
cls.user, f"Reviewed Package {idx:03d}", "Reviewed package for tests"
|
||||
)
|
||||
package.reviewed = True
|
||||
package.save()
|
||||
|
||||
# Create unreviewed packages
|
||||
cls.total_unreviewed = 15
|
||||
for idx in range(cls.total_unreviewed):
|
||||
PackageManager.create_package(
|
||||
cls.user, f"Draft Package {idx:03d}", "Unreviewed package for tests"
|
||||
)
|
||||
|
||||
def setUp(self):
|
||||
self.client.force_login(self.user)
|
||||
|
||||
def test_anonymous_can_access_reviewed_packages(self):
|
||||
self.client.logout()
|
||||
response = self.client.get(self.ENDPOINT)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
# Anonymous users can only see reviewed packages
|
||||
self.assertEqual(payload["total_items"], self.total_reviewed)
|
||||
self.assertTrue(all(item["review_status"] == "reviewed" for item in payload["items"]))
|
||||
|
||||
def test_listing_uses_default_page_size(self):
|
||||
response = self.client.get(self.ENDPOINT)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
self.assertEqual(payload["page"], 1)
|
||||
self.assertEqual(payload["page_size"], 50)
|
||||
self.assertEqual(payload["total_items"], self.total_reviewed + self.total_unreviewed)
|
||||
|
||||
def test_reviewed_filter_true(self):
|
||||
response = self.client.get(self.ENDPOINT, {"review_status": True})
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
self.assertEqual(payload["total_items"], self.total_reviewed)
|
||||
self.assertTrue(all(item["review_status"] == "reviewed" for item in payload["items"]))
|
||||
|
||||
def test_reviewed_filter_false(self):
|
||||
response = self.client.get(self.ENDPOINT, {"review_status": False})
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
self.assertEqual(payload["total_items"], self.total_unreviewed)
|
||||
self.assertTrue(all(item["review_status"] == "unreviewed" for item in payload["items"]))
|
||||
|
||||
def test_reviewed_filter_false_anonymous(self):
|
||||
self.client.logout()
|
||||
response = self.client.get(self.ENDPOINT, {"review_status": False})
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
payload = response.json()
|
||||
# Anonymous users cannot access unreviewed packages
|
||||
self.assertEqual(payload["total_items"], 0)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class CompoundPaginationAPITest(BaseTestAPIGetPaginated, TestCase):
|
||||
"""Compound pagination tests using base class."""
|
||||
|
||||
resource_name = "compound"
|
||||
resource_name_plural = "compounds"
|
||||
global_endpoint = "/api/v1/compounds/"
|
||||
package_endpoint_template = "/api/v1/package/{uuid}/compound/"
|
||||
total_reviewed = 125
|
||||
total_unreviewed = 35
|
||||
|
||||
@classmethod
|
||||
def create_reviewed_resource(cls, package, idx):
|
||||
simple_smiles = ["C", "CC", "CCC", "CCCC", "CCCCC"]
|
||||
smiles = simple_smiles[idx % len(simple_smiles)] + ("O" * (idx // len(simple_smiles)))
|
||||
return Compound.create(
|
||||
package,
|
||||
smiles,
|
||||
f"Reviewed Compound {idx:03d}",
|
||||
"Compound for pagination tests",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_unreviewed_resource(cls, package, idx):
|
||||
simple_smiles = ["C", "CC", "CCC", "CCCC", "CCCCC"]
|
||||
smiles = simple_smiles[idx % len(simple_smiles)] + ("N" * (idx // len(simple_smiles)))
|
||||
return Compound.create(
|
||||
package,
|
||||
smiles,
|
||||
f"Draft Compound {idx:03d}",
|
||||
"Compound for pagination tests",
|
||||
)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class RulePaginationAPITest(BaseTestAPIGetPaginated, TestCase):
|
||||
"""Rule pagination tests using base class."""
|
||||
|
||||
resource_name = "rule"
|
||||
resource_name_plural = "rules"
|
||||
global_endpoint = "/api/v1/rules/"
|
||||
package_endpoint_template = "/api/v1/package/{uuid}/rule/"
|
||||
total_reviewed = 125
|
||||
total_unreviewed = 35
|
||||
|
||||
@classmethod
|
||||
def create_reviewed_resource(cls, package, idx):
|
||||
# Create unique SMIRKS by combining chain length and functional group variations
|
||||
# This ensures each idx gets a truly unique SMIRKS pattern
|
||||
carbon_chain = "C" * (idx + 1) # C, CC, CCC, ... (grows with idx)
|
||||
smirks = f"[{carbon_chain}:1]>>[{carbon_chain}:1]O"
|
||||
return SimpleAmbitRule.create(
|
||||
package,
|
||||
f"Reviewed Rule {idx:03d}",
|
||||
f"Rule {idx} for pagination tests",
|
||||
smirks,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_unreviewed_resource(cls, package, idx):
|
||||
# Create unique SMIRKS by varying the carbon chain length
|
||||
carbon_chain = "C" * (idx + 1) # C, CC, CCC, ... (grows with idx)
|
||||
smirks = f"[{carbon_chain}:1]>>[{carbon_chain}:1]N"
|
||||
return SimpleAmbitRule.create(
|
||||
package,
|
||||
f"Draft Rule {idx:03d}",
|
||||
f"Rule {idx} for pagination tests",
|
||||
smirks,
|
||||
)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class ReactionPaginationAPITest(BaseTestAPIGetPaginated, TestCase):
|
||||
"""Reaction pagination tests using base class."""
|
||||
|
||||
resource_name = "reaction"
|
||||
resource_name_plural = "reactions"
|
||||
global_endpoint = "/api/v1/reactions/"
|
||||
package_endpoint_template = "/api/v1/package/{uuid}/reaction/"
|
||||
total_reviewed = 125
|
||||
total_unreviewed = 35
|
||||
|
||||
@classmethod
|
||||
def create_reviewed_resource(cls, package, idx):
|
||||
# Generate unique SMILES with growing chain lengths to avoid duplicates
|
||||
# Each idx gets a unique chain length
|
||||
educt_smiles = "C" * (idx + 1) # C, CC, CCC, ... (grows with idx)
|
||||
product_smiles = educt_smiles + "O"
|
||||
return Reaction.create(
|
||||
package=package,
|
||||
name=f"Reviewed Reaction {idx:03d}",
|
||||
description="Reaction for pagination tests",
|
||||
educts=[educt_smiles],
|
||||
products=[product_smiles],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_unreviewed_resource(cls, package, idx):
|
||||
# Generate unique SMILES with growing chain lengths to avoid duplicates
|
||||
# Each idx gets a unique chain length
|
||||
educt_smiles = "C" * (idx + 1) # C, CC, CCC, ... (grows with idx)
|
||||
product_smiles = educt_smiles + "N"
|
||||
return Reaction.create(
|
||||
package=package,
|
||||
name=f"Draft Reaction {idx:03d}",
|
||||
description="Reaction for pagination tests",
|
||||
educts=[educt_smiles],
|
||||
products=[product_smiles],
|
||||
)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class PathwayPaginationAPITest(BaseTestAPIGetPaginated, TestCase):
|
||||
"""Pathway pagination tests using base class."""
|
||||
|
||||
resource_name = "pathway"
|
||||
resource_name_plural = "pathways"
|
||||
global_endpoint = "/api/v1/pathways/"
|
||||
package_endpoint_template = "/api/v1/package/{uuid}/pathway/"
|
||||
total_reviewed = 125
|
||||
total_unreviewed = 35
|
||||
|
||||
@classmethod
|
||||
def create_reviewed_resource(cls, package, idx):
|
||||
return Pathway.objects.create(
|
||||
package=package,
|
||||
name=f"Reviewed Pathway {idx:03d}",
|
||||
description="Pathway for pagination tests",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_unreviewed_resource(cls, package, idx):
|
||||
return Pathway.objects.create(
|
||||
package=package,
|
||||
name=f"Draft Pathway {idx:03d}",
|
||||
description="Pathway for pagination tests",
|
||||
)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class ModelPaginationAPITest(BaseTestAPIGetPaginated, TestCase):
|
||||
"""Model pagination tests using base class."""
|
||||
|
||||
resource_name = "model"
|
||||
resource_name_plural = "models"
|
||||
global_endpoint = "/api/v1/models/"
|
||||
package_endpoint_template = "/api/v1/package/{uuid}/model/"
|
||||
total_reviewed = 125
|
||||
total_unreviewed = 35
|
||||
|
||||
@classmethod
|
||||
def create_reviewed_resource(cls, package, idx):
|
||||
return EPModel.objects.create(
|
||||
package=package,
|
||||
name=f"Reviewed Model {idx:03d}",
|
||||
description="Model for pagination tests",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_unreviewed_resource(cls, package, idx):
|
||||
return EPModel.objects.create(
|
||||
package=package,
|
||||
name=f"Draft Model {idx:03d}",
|
||||
description="Model for pagination tests",
|
||||
)
|
||||
|
||||
|
||||
@tag("api", "end2end")
|
||||
class ScenarioPaginationAPITest(BaseTestAPIGetPaginated, TestCase):
|
||||
"""Scenario pagination tests using base class."""
|
||||
|
||||
resource_name = "scenario"
|
||||
resource_name_plural = "scenarios"
|
||||
global_endpoint = "/api/v1/scenarios/"
|
||||
package_endpoint_template = "/api/v1/package/{uuid}/scenario/"
|
||||
total_reviewed = 125
|
||||
total_unreviewed = 35
|
||||
|
||||
@classmethod
|
||||
def create_reviewed_resource(cls, package, idx):
|
||||
return Scenario.create(
|
||||
package,
|
||||
f"Reviewed Scenario {idx:03d}",
|
||||
"Scenario for pagination tests",
|
||||
"2025-01-01",
|
||||
"lab",
|
||||
[],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_unreviewed_resource(cls, package, idx):
|
||||
return Scenario.create(
|
||||
package,
|
||||
f"Draft Scenario {idx:03d}",
|
||||
"Scenario for pagination tests",
|
||||
"2025-01-01",
|
||||
"field",
|
||||
[],
|
||||
)
|
||||
301
epapi/tests/v1/test_scenario_creation.py
Normal file
@ -0,0 +1,301 @@
|
||||
"""
|
||||
Tests for Scenario Creation Endpoint Error Handling.
|
||||
|
||||
Tests comprehensive error handling for POST /api/v1/package/{uuid}/scenario/
|
||||
including package not found, permission denied, validation errors, and database errors.
|
||||
"""
|
||||
|
||||
from django.test import TestCase, tag
|
||||
import json
|
||||
from uuid import uuid4
|
||||
|
||||
from epdb.logic import PackageManager, UserManager
|
||||
from epdb.models import Scenario
|
||||
|
||||
|
||||
@tag("api", "scenario_creation")
|
||||
class ScenarioCreationAPITests(TestCase):
|
||||
"""Test scenario creation endpoint error handling."""
|
||||
|
||||
@classmethod
|
||||
def setUpTestData(cls):
|
||||
"""Set up test data: users and packages."""
|
||||
cls.user = UserManager.create_user(
|
||||
"scenario-test-user",
|
||||
"scenario-test@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
cls.other_user = UserManager.create_user(
|
||||
"other-user",
|
||||
"other@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
cls.package = PackageManager.create_package(
|
||||
cls.user, "Test Package", "Test package for scenario creation"
|
||||
)
|
||||
|
||||
def test_create_scenario_package_not_found(self):
|
||||
"""Test that non-existent package UUID returns 404."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
fake_uuid = uuid4()
|
||||
payload = {
|
||||
"name": "Test Scenario",
|
||||
"description": "Test description",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{fake_uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 404)
|
||||
self.assertIn("Package not found", response.json()["detail"])
|
||||
|
||||
def test_create_scenario_insufficient_permissions(self):
|
||||
"""Test that unauthorized access returns 403."""
|
||||
self.client.force_login(self.other_user)
|
||||
|
||||
payload = {
|
||||
"name": "Test Scenario",
|
||||
"description": "Test description",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 403)
|
||||
self.assertIn("permission", response.json()["detail"].lower())
|
||||
|
||||
def test_create_scenario_invalid_ai_type(self):
|
||||
"""Test that unknown additional information type returns 400."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
payload = {
|
||||
"name": "Test Scenario",
|
||||
"description": "Test description",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [
|
||||
{"type": "invalid_type_that_does_not_exist", "data": {"some_field": "some_value"}}
|
||||
],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 400)
|
||||
response_data = response.json()
|
||||
self.assertIn("Validation errors", response_data["detail"])
|
||||
|
||||
def test_create_scenario_validation_error(self):
|
||||
"""Test that invalid additional information data returns 400."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# Use malformed data structure for an actual AI type
|
||||
payload = {
|
||||
"name": "Test Scenario",
|
||||
"description": "Test description",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [
|
||||
{
|
||||
"type": "invalid_type_name",
|
||||
"data": None, # This should cause a validation error
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
# Should return 422 for validation errors
|
||||
self.assertEqual(response.status_code, 422)
|
||||
|
||||
def test_create_scenario_success(self):
|
||||
"""Test that valid scenario creation returns 200."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
payload = {
|
||||
"name": "Test Scenario",
|
||||
"description": "Test description",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
data = response.json()
|
||||
self.assertEqual(data["name"], "Test Scenario")
|
||||
self.assertEqual(data["description"], "Test description")
|
||||
|
||||
# Verify scenario was actually created
|
||||
scenario = Scenario.objects.get(name="Test Scenario")
|
||||
self.assertEqual(scenario.package, self.package)
|
||||
self.assertEqual(scenario.scenario_type, "biodegradation")
|
||||
|
||||
def test_create_scenario_auto_name(self):
|
||||
"""Test that empty name triggers auto-generation."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
payload = {
|
||||
"name": "", # Empty name should be auto-generated
|
||||
"description": "Test description",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
data = response.json()
|
||||
# Auto-generated name should follow pattern "Scenario N"
|
||||
self.assertTrue(data["name"].startswith("Scenario "))
|
||||
|
||||
def test_create_scenario_xss_protection(self):
|
||||
"""Test that XSS attempts are sanitized."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
payload = {
|
||||
"name": "<script>alert('xss')</script>Clean Name",
|
||||
"description": "<img src=x onerror=alert('xss')>Description",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
data = response.json()
|
||||
# XSS should be cleaned out
|
||||
self.assertNotIn("<script>", data["name"])
|
||||
self.assertNotIn("onerror", data["description"])
|
||||
|
||||
def test_create_scenario_missing_required_field(self):
|
||||
"""Test that missing required fields returns validation error."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# Missing 'name' field entirely
|
||||
payload = {
|
||||
"description": "Test description",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
# Should return 422 for schema validation errors
|
||||
self.assertEqual(response.status_code, 422)
|
||||
|
||||
def test_create_scenario_type_error_in_ai(self):
|
||||
"""Test that TypeError in AI instantiation returns 400."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
payload = {
|
||||
"name": "Test Scenario",
|
||||
"description": "Test description",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [
|
||||
{
|
||||
"type": "invalid_type_name",
|
||||
"data": "string instead of dict", # Wrong type
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
# Should return 422 for validation errors
|
||||
self.assertEqual(response.status_code, 422)
|
||||
|
||||
def test_create_scenario_default_values(self):
|
||||
"""Test that default values are applied correctly."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
# Minimal payload with only name
|
||||
payload = {"name": "Minimal Scenario"}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
data = response.json()
|
||||
self.assertEqual(data["name"], "Minimal Scenario")
|
||||
# Check defaults are applied
|
||||
scenario = Scenario.objects.get(name="Minimal Scenario")
|
||||
# Default description from model is "no description"
|
||||
self.assertIn(scenario.description.lower(), ["", "no description"])
|
||||
|
||||
def test_create_scenario_unicode_characters(self):
|
||||
"""Test that unicode characters are handled properly."""
|
||||
self.client.force_login(self.user)
|
||||
|
||||
payload = {
|
||||
"name": "Test Scenario 测试 🧪",
|
||||
"description": "Description with émojis and spëcial çhars",
|
||||
"scenario_date": "2024-01-01",
|
||||
"scenario_type": "biodegradation",
|
||||
"additional_information": [],
|
||||
}
|
||||
|
||||
response = self.client.post(
|
||||
f"/api/v1/package/{self.package.uuid}/scenario/",
|
||||
data=json.dumps(payload),
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
data = response.json()
|
||||
self.assertIn("测试", data["name"])
|
||||
self.assertIn("émojis", data["description"])
|
||||
114
epapi/tests/v1/test_schema_generation.py
Normal file
@ -0,0 +1,114 @@
|
||||
"""
|
||||
Property-based tests for schema generation.
|
||||
|
||||
Tests that verify schema generation works correctly for all models,
|
||||
regardless of their structure.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from typing import Type
|
||||
from pydantic import BaseModel
|
||||
|
||||
from envipy_additional_information import registry, EnviPyModel
|
||||
from epapi.utils.schema_transformers import build_rjsf_output
|
||||
|
||||
|
||||
class TestSchemaGeneration:
|
||||
"""Test that all models can generate valid RJSF schemas."""
|
||||
|
||||
@pytest.mark.parametrize("model_name,model_cls", list(registry.list_models().items()))
|
||||
def test_all_models_generate_rjsf(self, model_name: str, model_cls: Type[BaseModel]):
|
||||
"""Every model in the registry should generate valid RJSF format."""
|
||||
# Skip non-EnviPyModel classes (parsers, etc.)
|
||||
if not issubclass(model_cls, EnviPyModel):
|
||||
pytest.skip(f"{model_name} is not an EnviPyModel")
|
||||
|
||||
# Should not raise exception
|
||||
result = build_rjsf_output(model_cls)
|
||||
|
||||
# Verify structure
|
||||
assert isinstance(result, dict), f"{model_name}: Result should be a dict"
|
||||
assert "schema" in result, f"{model_name}: Missing 'schema' key"
|
||||
assert "uiSchema" in result, f"{model_name}: Missing 'uiSchema' key"
|
||||
assert "formData" in result, f"{model_name}: Missing 'formData' key"
|
||||
assert "groups" in result, f"{model_name}: Missing 'groups' key"
|
||||
|
||||
# Verify types
|
||||
assert isinstance(result["schema"], dict), f"{model_name}: schema should be dict"
|
||||
assert isinstance(result["uiSchema"], dict), f"{model_name}: uiSchema should be dict"
|
||||
assert isinstance(result["formData"], dict), f"{model_name}: formData should be dict"
|
||||
assert isinstance(result["groups"], list), f"{model_name}: groups should be list"
|
||||
|
||||
# Verify schema has properties
|
||||
assert "properties" in result["schema"], f"{model_name}: schema should have 'properties'"
|
||||
assert isinstance(result["schema"]["properties"], dict), (
|
||||
f"{model_name}: properties should be dict"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("model_name,model_cls", list(registry.list_models().items()))
|
||||
def test_ui_schema_matches_schema_fields(self, model_name: str, model_cls: Type[BaseModel]):
|
||||
"""uiSchema keys should match schema properties (or be nested for intervals)."""
|
||||
if not issubclass(model_cls, EnviPyModel):
|
||||
pytest.skip(f"{model_name} is not an EnviPyModel")
|
||||
|
||||
result = build_rjsf_output(model_cls)
|
||||
schema_props = set(result["schema"]["properties"].keys())
|
||||
ui_schema_keys = set(result["uiSchema"].keys())
|
||||
|
||||
# uiSchema should have entries for all top-level properties
|
||||
# (intervals may have nested start/end, but the main field should be present)
|
||||
assert ui_schema_keys.issubset(schema_props), (
|
||||
f"{model_name}: uiSchema has keys not in schema: {ui_schema_keys - schema_props}"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("model_name,model_cls", list(registry.list_models().items()))
|
||||
def test_groups_is_list_of_strings(self, model_name: str, model_cls: Type[BaseModel]):
|
||||
"""Groups should be a list of strings."""
|
||||
if not issubclass(model_cls, EnviPyModel):
|
||||
pytest.skip(f"{model_name} is not an EnviPyModel")
|
||||
|
||||
result = build_rjsf_output(model_cls)
|
||||
groups = result["groups"]
|
||||
|
||||
assert isinstance(groups, list), f"{model_name}: groups should be list"
|
||||
assert all(isinstance(g, str) for g in groups), (
|
||||
f"{model_name}: all groups should be strings, got {groups}"
|
||||
)
|
||||
assert len(groups) > 0, f"{model_name}: should have at least one group"
|
||||
|
||||
@pytest.mark.parametrize("model_name,model_cls", list(registry.list_models().items()))
|
||||
def test_form_data_matches_schema(self, model_name: str, model_cls: Type[BaseModel]):
|
||||
"""formData keys should match schema properties."""
|
||||
if not issubclass(model_cls, EnviPyModel):
|
||||
pytest.skip(f"{model_name} is not an EnviPyModel")
|
||||
|
||||
result = build_rjsf_output(model_cls)
|
||||
schema_props = set(result["schema"]["properties"].keys())
|
||||
form_data_keys = set(result["formData"].keys())
|
||||
|
||||
# formData should only contain keys that are in schema
|
||||
assert form_data_keys.issubset(schema_props), (
|
||||
f"{model_name}: formData has keys not in schema: {form_data_keys - schema_props}"
|
||||
)
|
||||
|
||||
|
||||
class TestWidgetTypes:
|
||||
"""Test that widget types are valid."""
|
||||
|
||||
@pytest.mark.parametrize("model_name,model_cls", list(registry.list_models().items()))
|
||||
def test_widget_types_are_valid(self, model_name: str, model_cls: Type[BaseModel]):
|
||||
"""All widget types in uiSchema should be valid WidgetType values."""
|
||||
from envipy_additional_information.ui_config import WidgetType
|
||||
|
||||
if not issubclass(model_cls, EnviPyModel):
|
||||
pytest.skip(f"{model_name} is not an EnviPyModel")
|
||||
|
||||
result = build_rjsf_output(model_cls)
|
||||
valid_widgets = {wt.value for wt in WidgetType}
|
||||
|
||||
for field_name, ui_config in result["uiSchema"].items():
|
||||
widget = ui_config.get("ui:widget")
|
||||
if widget:
|
||||
assert widget in valid_widgets, (
|
||||
f"{model_name}.{field_name}: Invalid widget '{widget}'. Valid: {valid_widgets}"
|
||||
)
|
||||
94
epapi/tests/v1/test_token_auth.py
Normal file
@ -0,0 +1,94 @@
|
||||
from datetime import timedelta
|
||||
|
||||
from django.test import TestCase, tag
|
||||
from django.utils import timezone
|
||||
|
||||
from epdb.logic import PackageManager, UserManager
|
||||
from epdb.models import APIToken
|
||||
|
||||
|
||||
@tag("api", "auth")
|
||||
class BearerTokenAuthTests(TestCase):
|
||||
@classmethod
|
||||
def setUpTestData(cls):
|
||||
cls.user = UserManager.create_user(
|
||||
"token-user",
|
||||
"token-user@envipath.com",
|
||||
"SuperSafe",
|
||||
set_setting=False,
|
||||
add_to_group=False,
|
||||
is_active=True,
|
||||
)
|
||||
|
||||
default_pkg = cls.user.default_package
|
||||
cls.user.default_package = None
|
||||
cls.user.save()
|
||||
if default_pkg:
|
||||
default_pkg.delete()
|
||||
|
||||
cls.unreviewed_package = PackageManager.create_package(
|
||||
cls.user, "Token Auth Package", "Package for token auth tests"
|
||||
)
|
||||
|
||||
def _auth_header(self, raw_token):
|
||||
return {"HTTP_AUTHORIZATION": f"Bearer {raw_token}"}
|
||||
|
||||
def test_valid_token_allows_access(self):
|
||||
_, raw_token = APIToken.create_token(self.user, name="Valid Token", expires_days=1)
|
||||
|
||||
response = self.client.get("/api/v1/compounds/", **self._auth_header(raw_token))
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
|
||||
def test_expired_token_rejected(self):
|
||||
token, raw_token = APIToken.create_token(self.user, name="Expired Token", expires_days=1)
|
||||
token.expires_at = timezone.now() - timedelta(days=1)
|
||||
token.save(update_fields=["expires_at"])
|
||||
|
||||
response = self.client.get("/api/v1/compounds/", **self._auth_header(raw_token))
|
||||
|
||||
self.assertEqual(response.status_code, 401)
|
||||
|
||||
def test_inactive_token_rejected(self):
|
||||
token, raw_token = APIToken.create_token(self.user, name="Inactive Token", expires_days=1)
|
||||
token.is_active = False
|
||||
token.save(update_fields=["is_active"])
|
||||
|
||||
response = self.client.get("/api/v1/compounds/", **self._auth_header(raw_token))
|
||||
|
||||
self.assertEqual(response.status_code, 401)
|
||||
|
||||
def test_invalid_token_rejected(self):
|
||||
response = self.client.get("/api/v1/compounds/", HTTP_AUTHORIZATION="Bearer invalid-token")
|
||||
|
||||
self.assertEqual(response.status_code, 401)
|
||||
|
||||
def test_no_token_rejected(self):
|
||||
self.client.logout()
|
||||
response = self.client.get("/api/v1/compounds/")
|
||||
|
||||
self.assertEqual(response.status_code, 401)
|
||||
|
||||
def test_bearer_populates_request_user_for_packages(self):
|
||||
response = self.client.get("/api/v1/packages/")
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
uuids = {item["uuid"] for item in payload["items"]}
|
||||
self.assertNotIn(str(self.unreviewed_package.uuid), uuids)
|
||||
|
||||
_, raw_token = APIToken.create_token(self.user, name="Package Token", expires_days=1)
|
||||
response = self.client.get("/api/v1/packages/", **self._auth_header(raw_token))
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
uuids = {item["uuid"] for item in payload["items"]}
|
||||
self.assertIn(str(self.unreviewed_package.uuid), uuids)
|
||||
|
||||
def test_session_auth_still_works_without_bearer(self):
|
||||
self.client.force_login(self.user)
|
||||
response = self.client.get("/api/v1/packages/")
|
||||
|
||||
self.assertEqual(response.status_code, 200)
|
||||
payload = response.json()
|
||||
uuids = {item["uuid"] for item in payload["items"]}
|
||||
self.assertIn(str(self.unreviewed_package.uuid), uuids)
|
||||
0
epapi/utils/__init__.py
Normal file
181
epapi/utils/schema_transformers.py
Normal file
@ -0,0 +1,181 @@
|
||||
"""
|
||||
Schema transformation utilities for converting Pydantic models to RJSF format.
|
||||
|
||||
This module provides functions to extract UI configuration from Pydantic models
|
||||
and transform them into React JSON Schema Form (RJSF) compatible format.
|
||||
"""
|
||||
|
||||
from typing import Type, Optional, Any
|
||||
|
||||
import jsonref
|
||||
from pydantic import BaseModel
|
||||
|
||||
from envipy_additional_information.ui_config import UIConfig
|
||||
from envipy_additional_information import registry
|
||||
|
||||
|
||||
def extract_groups(model_cls: Type[BaseModel]) -> list[str]:
|
||||
"""
|
||||
Extract groups from registry-stored group information.
|
||||
|
||||
Args:
|
||||
model_cls: The model class
|
||||
|
||||
Returns:
|
||||
List of group names the model belongs to
|
||||
"""
|
||||
return registry.get_groups(model_cls)
|
||||
|
||||
|
||||
def extract_ui_metadata(model_cls: Type[BaseModel]) -> dict[str, Any]:
|
||||
"""
|
||||
Extract model-level UI metadata from UI class.
|
||||
|
||||
Returns metadata attributes that are NOT UIConfig instances.
|
||||
Common metadata includes: unit, description, title.
|
||||
"""
|
||||
metadata: dict[str, Any] = {}
|
||||
|
||||
if not hasattr(model_cls, "UI"):
|
||||
return metadata
|
||||
|
||||
ui_class = getattr(model_cls, "UI")
|
||||
|
||||
# Iterate over all attributes in the UI class
|
||||
for attr_name in dir(ui_class):
|
||||
# Skip private attributes
|
||||
if attr_name.startswith("_"):
|
||||
continue
|
||||
|
||||
# Get the attribute value
|
||||
try:
|
||||
attr_value = getattr(ui_class, attr_name)
|
||||
except AttributeError:
|
||||
continue
|
||||
|
||||
# Skip callables but keep types/classes
|
||||
if callable(attr_value) and not isinstance(attr_value, type):
|
||||
continue
|
||||
|
||||
# Skip UIConfig instances (these are field-level configs, not metadata)
|
||||
# This includes both UIConfig and IntervalConfig
|
||||
if isinstance(attr_value, UIConfig):
|
||||
continue
|
||||
|
||||
metadata[attr_name] = attr_value
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
def extract_ui_config_from_model(model_cls: Type[BaseModel]) -> dict[str, Any]:
|
||||
"""
|
||||
Extract UI configuration from model's UI class.
|
||||
|
||||
Returns a dictionary mapping field names to their UI schema configurations.
|
||||
Trusts the config classes to handle their own transformation logic.
|
||||
"""
|
||||
ui_configs: dict[str, Any] = {}
|
||||
|
||||
if not hasattr(model_cls, "UI"):
|
||||
return ui_configs
|
||||
|
||||
ui_class = getattr(model_cls, "UI")
|
||||
schema = model_cls.model_json_schema()
|
||||
field_names = schema.get("properties", {}).keys()
|
||||
|
||||
# Extract config for each field
|
||||
for field_name in field_names:
|
||||
# Skip if UI config doesn't exist for this field (field may be hidden from UI)
|
||||
if not hasattr(ui_class, field_name):
|
||||
continue
|
||||
|
||||
ui_config = getattr(ui_class, field_name)
|
||||
|
||||
if isinstance(ui_config, UIConfig):
|
||||
ui_configs[field_name] = ui_config.to_ui_schema_field()
|
||||
|
||||
return ui_configs
|
||||
|
||||
|
||||
def build_ui_schema(model_cls: Type[BaseModel]) -> dict:
|
||||
"""Generate RJSF uiSchema from model's UI class."""
|
||||
ui_schema = {}
|
||||
|
||||
# Extract field-level UI configs
|
||||
field_configs = extract_ui_config_from_model(model_cls)
|
||||
|
||||
for field_name, config in field_configs.items():
|
||||
ui_schema[field_name] = config
|
||||
|
||||
return ui_schema
|
||||
|
||||
|
||||
def build_schema(model_cls: Type[BaseModel]) -> dict[str, Any]:
|
||||
"""
|
||||
Build JSON schema from Pydantic model, applying UI metadata.
|
||||
|
||||
Dereferences all $ref pointers to produce fully inlined schema.
|
||||
This ensures the frontend receives schemas with enum values and nested
|
||||
properties fully resolved, without needing client-side ref resolution.
|
||||
|
||||
Extracts model-level metadata from UI class (title, unit, etc.) and applies
|
||||
it to the generated schema. This ensures UI metadata is the single source of truth.
|
||||
"""
|
||||
schema = model_cls.model_json_schema()
|
||||
|
||||
# Dereference $ref pointers (inlines $defs) using jsonref
|
||||
# This ensures the frontend receives schemas with enum values and nested
|
||||
# properties fully resolved, currently necessary for client-side rendering.
|
||||
# FIXME: This is a hack to get the schema to work with alpine schema-form.js replace once we migrate to client-side framework.
|
||||
schema = jsonref.replace_refs(schema, proxies=False)
|
||||
|
||||
# Remove $defs section as all refs are now inlined
|
||||
if "$defs" in schema:
|
||||
del schema["$defs"]
|
||||
|
||||
# Extract and apply UI metadata (title, unit, description, etc.)
|
||||
ui_metadata = extract_ui_metadata(model_cls)
|
||||
|
||||
# Apply all metadata consistently as custom properties with x- prefix
|
||||
# This ensures consistency and avoids conflicts with standard JSON Schema properties
|
||||
for key, value in ui_metadata.items():
|
||||
if value is not None:
|
||||
schema[f"x-{key}"] = value
|
||||
|
||||
# Set standard title property from UI metadata for JSON Schema compliance
|
||||
if "title" in ui_metadata:
|
||||
schema["title"] = ui_metadata["title"]
|
||||
elif "label" in ui_metadata:
|
||||
schema["title"] = ui_metadata["label"]
|
||||
|
||||
return schema
|
||||
|
||||
|
||||
def build_rjsf_output(model_cls: Type[BaseModel], initial_data: Optional[dict] = None) -> dict:
|
||||
"""
|
||||
Main function that returns complete RJSF format.
|
||||
|
||||
Trusts the config classes to handle their own transformation logic.
|
||||
No special-case handling - if a config knows how to transform itself, it will.
|
||||
|
||||
Returns:
|
||||
dict with keys: schema, uiSchema, formData, groups
|
||||
"""
|
||||
# Build schema with UI metadata applied
|
||||
schema = build_schema(model_cls)
|
||||
|
||||
# Build UI schema - config classes handle their own transformation
|
||||
ui_schema = build_ui_schema(model_cls)
|
||||
|
||||
# Extract groups from marker interfaces
|
||||
groups = extract_groups(model_cls)
|
||||
|
||||
# Use provided initial_data or empty dict
|
||||
form_data = initial_data if initial_data is not None else {}
|
||||
|
||||
return {
|
||||
"schema": schema,
|
||||
"uiSchema": ui_schema,
|
||||
"formData": form_data,
|
||||
"groups": groups,
|
||||
}
|
||||
82
epapi/utils/validation_errors.py
Normal file
@ -0,0 +1,82 @@
|
||||
"""Shared utilities for handling Pydantic validation errors."""
|
||||
|
||||
import json
|
||||
from pydantic import ValidationError
|
||||
from pydantic_core import ErrorDetails
|
||||
from ninja.errors import HttpError
|
||||
|
||||
|
||||
def format_validation_error(error: ErrorDetails) -> str:
|
||||
"""Format a Pydantic validation error into a user-friendly message.
|
||||
|
||||
Args:
|
||||
error: A Pydantic error details dictionary containing 'msg', 'type', 'ctx', etc.
|
||||
|
||||
Returns:
|
||||
A user-friendly error message string.
|
||||
"""
|
||||
msg = error.get("msg") or "Invalid value"
|
||||
error_type = error.get("type") or ""
|
||||
|
||||
# Handle common validation types with friendly messages
|
||||
if error_type == "enum":
|
||||
ctx = error.get("ctx", {})
|
||||
expected = ctx.get("expected", "") if ctx else ""
|
||||
return f"Please select a valid option{': ' + expected if expected else ''}"
|
||||
elif error_type == "literal_error":
|
||||
# Literal errors (like Literal["active", "inactive"])
|
||||
return msg.replace("Input should be ", "Please enter ")
|
||||
elif error_type == "missing":
|
||||
return "This field is required"
|
||||
elif error_type == "string_type":
|
||||
return "Please enter a valid string"
|
||||
elif error_type == "int_type":
|
||||
return "Please enter a valid int"
|
||||
elif error_type == "int_parsing":
|
||||
return "Please enter a valid int"
|
||||
elif error_type == "float_type":
|
||||
return "Please enter a valid float"
|
||||
elif error_type == "float_parsing":
|
||||
return "Please enter a valid float"
|
||||
elif error_type == "value_error":
|
||||
# Strip "Value error, " prefix from custom validator messages
|
||||
return msg.replace("Value error, ", "")
|
||||
else:
|
||||
# Default: use the message from Pydantic but clean it up
|
||||
return msg.replace("Input should be ", "Please enter ").replace("Value error, ", "")
|
||||
|
||||
|
||||
def handle_validation_error(e: ValidationError) -> None:
|
||||
"""Convert a Pydantic ValidationError into a structured HttpError.
|
||||
|
||||
This function transforms Pydantic validation errors into a JSON structure
|
||||
that the frontend expects for displaying field-level errors.
|
||||
|
||||
Args:
|
||||
e: The Pydantic ValidationError to handle.
|
||||
|
||||
Raises:
|
||||
HttpError: Always raises a 400 error with structured JSON containing
|
||||
type, field_errors, and message fields.
|
||||
"""
|
||||
# Transform Pydantic validation errors into user-friendly format
|
||||
field_errors: dict[str, list[str]] = {}
|
||||
for error in e.errors():
|
||||
# Get the field name from location tuple
|
||||
loc = error.get("loc", ())
|
||||
field = str(loc[-1]) if loc else "root"
|
||||
|
||||
# Format the error message
|
||||
friendly_msg = format_validation_error(error)
|
||||
|
||||
if field not in field_errors:
|
||||
field_errors[field] = []
|
||||
field_errors[field].append(friendly_msg)
|
||||
|
||||
# Return structured error for frontend parsing
|
||||
error_response = {
|
||||
"type": "validation_error",
|
||||
"field_errors": field_errors,
|
||||
"message": "Please correct the errors below",
|
||||
}
|
||||
raise HttpError(400, json.dumps(error_response))
|
||||
0
epapi/v1/__init__.py
Normal file
34
epapi/v1/auth.py
Normal file
@ -0,0 +1,34 @@
|
||||
import hashlib
|
||||
|
||||
from ninja.security import HttpBearer
|
||||
from ninja.errors import HttpError
|
||||
|
||||
from epdb.models import APIToken
|
||||
|
||||
|
||||
class BearerTokenAuth(HttpBearer):
|
||||
def authenticate(self, request, token):
|
||||
if token is None:
|
||||
return None
|
||||
|
||||
hashed_token = hashlib.sha256(token.encode()).hexdigest()
|
||||
user = APIToken.authenticate(hashed_token, hashed=True)
|
||||
if not user:
|
||||
raise HttpError(401, "Invalid or expired token")
|
||||
|
||||
request.user = user
|
||||
return user
|
||||
|
||||
|
||||
class OptionalBearerTokenAuth:
|
||||
"""Bearer auth that allows unauthenticated access.
|
||||
|
||||
Validates the Bearer token if present (401 on invalid token),
|
||||
otherwise lets the request through for anonymous/session access.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._bearer = BearerTokenAuth()
|
||||
|
||||
def __call__(self, request):
|
||||
return self._bearer(request) or request.user
|
||||
119
epapi/v1/dal.py
Normal file
@ -0,0 +1,119 @@
|
||||
from django.db.models import Model
|
||||
from epdb.logic import PackageManager
|
||||
from epdb.models import CompoundStructure, User, Package, Compound, Scenario
|
||||
from uuid import UUID
|
||||
|
||||
from .errors import EPAPINotFoundError, EPAPIPermissionDeniedError
|
||||
|
||||
|
||||
def get_compound_for_read(user, compound_uuid: UUID):
|
||||
"""
|
||||
Get compound by UUID with permission check.
|
||||
"""
|
||||
try:
|
||||
compound = Compound.objects.get(uuid=compound_uuid)
|
||||
package = compound.package
|
||||
except Compound.DoesNotExist:
|
||||
raise EPAPINotFoundError(f"Compound with UUID {compound_uuid} not found")
|
||||
|
||||
# FIXME: optimize package manager to exclusively work with UUIDs
|
||||
if not user or user.is_anonymous or not PackageManager.readable(user, package):
|
||||
raise EPAPIPermissionDeniedError("Insufficient permissions to access this compound.")
|
||||
|
||||
return compound
|
||||
|
||||
|
||||
def get_package_for_read(user, package_uuid: UUID):
|
||||
"""
|
||||
Get package by UUID with permission check.
|
||||
"""
|
||||
|
||||
# FIXME: update package manager with custom exceptions to avoid manual checks here
|
||||
try:
|
||||
package = Package.objects.get(uuid=package_uuid)
|
||||
except Package.DoesNotExist:
|
||||
raise EPAPINotFoundError(f"Package with UUID {package_uuid} not found")
|
||||
|
||||
# FIXME: optimize package manager to exclusively work with UUIDs
|
||||
if not user or user.is_anonymous or not PackageManager.readable(user, package):
|
||||
raise EPAPIPermissionDeniedError("Insufficient permissions to access this package.")
|
||||
|
||||
return package
|
||||
|
||||
|
||||
def get_scenario_for_read(user, scenario_uuid: UUID):
|
||||
"""Get scenario by UUID with read permission check."""
|
||||
try:
|
||||
scenario = Scenario.objects.select_related("package").get(uuid=scenario_uuid)
|
||||
except Scenario.DoesNotExist:
|
||||
raise EPAPINotFoundError(f"Scenario with UUID {scenario_uuid} not found")
|
||||
|
||||
if not user or user.is_anonymous or not PackageManager.readable(user, scenario.package):
|
||||
raise EPAPIPermissionDeniedError("Insufficient permissions to access this scenario.")
|
||||
|
||||
return scenario
|
||||
|
||||
|
||||
def get_scenario_for_write(user, scenario_uuid: UUID):
|
||||
"""Get scenario by UUID with write permission check."""
|
||||
try:
|
||||
scenario = Scenario.objects.select_related("package").get(uuid=scenario_uuid)
|
||||
except Scenario.DoesNotExist:
|
||||
raise EPAPINotFoundError(f"Scenario with UUID {scenario_uuid} not found")
|
||||
|
||||
if not user or user.is_anonymous or not PackageManager.writable(user, scenario.package):
|
||||
raise EPAPIPermissionDeniedError("Insufficient permissions to modify this scenario.")
|
||||
|
||||
return scenario
|
||||
|
||||
|
||||
def get_user_packages_for_read(user: User | None):
|
||||
"""Get all packages readable by the user."""
|
||||
if not user or user.is_anonymous:
|
||||
return PackageManager.get_reviewed_packages()
|
||||
return PackageManager.get_all_readable_packages(user, include_reviewed=True)
|
||||
|
||||
|
||||
def get_user_entities_for_read(model_class: Model, user: User | None):
|
||||
"""Build queryset for reviewed package entities."""
|
||||
|
||||
if not user or user.is_anonymous:
|
||||
return model_class.objects.filter(package__reviewed=True).select_related("package")
|
||||
|
||||
qs = model_class.objects.filter(
|
||||
package__in=PackageManager.get_all_readable_packages(user, include_reviewed=True)
|
||||
).select_related("package")
|
||||
return qs
|
||||
|
||||
|
||||
def get_package_entities_for_read(model_class: Model, package_uuid: UUID, user: User | None = None):
|
||||
"""Build queryset for specific package entities."""
|
||||
package = get_package_for_read(user, package_uuid)
|
||||
qs = model_class.objects.filter(package=package).select_related("package")
|
||||
return qs
|
||||
|
||||
|
||||
def get_user_structure_for_read(user: User | None):
|
||||
"""Build queryset for structures accessible to the user (via compound->package)."""
|
||||
|
||||
if not user or user.is_anonymous:
|
||||
return CompoundStructure.objects.filter(compound__package__reviewed=True).select_related(
|
||||
"compound__package"
|
||||
)
|
||||
|
||||
qs = CompoundStructure.objects.filter(
|
||||
compound__package__in=PackageManager.get_all_readable_packages(user, include_reviewed=True)
|
||||
).select_related("compound__package")
|
||||
return qs
|
||||
|
||||
|
||||
def get_package_compound_structure_for_read(
|
||||
package_uuid: UUID, compound_uuid: UUID, user: User | None = None
|
||||
):
|
||||
"""Build queryset for specific package compound structures."""
|
||||
|
||||
get_package_for_read(user, package_uuid)
|
||||
compound = get_compound_for_read(user, compound_uuid)
|
||||
|
||||
qs = CompoundStructure.objects.filter(compound=compound).select_related("compound__package")
|
||||
return qs
|
||||
0
epapi/v1/endpoints/__init__.py
Normal file
174
epapi/v1/endpoints/additional_information.py
Normal file
@ -0,0 +1,174 @@
|
||||
from ninja import Router, Body
|
||||
from ninja.errors import HttpError
|
||||
from uuid import UUID
|
||||
from pydantic import ValidationError
|
||||
from typing import Dict, Any
|
||||
import logging
|
||||
|
||||
from envipy_additional_information import registry
|
||||
from envipy_additional_information.groups import GroupEnum
|
||||
from epapi.utils.schema_transformers import build_rjsf_output
|
||||
from epapi.utils.validation_errors import handle_validation_error
|
||||
from ..dal import get_scenario_for_read, get_scenario_for_write
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = Router(tags=["Additional Information"])
|
||||
|
||||
|
||||
@router.get("/information/schema/")
|
||||
def list_all_schemas(request):
|
||||
"""Return all schemas in RJSF format with lowercase class names as keys."""
|
||||
result = {}
|
||||
for name, cls in registry.list_models().items():
|
||||
try:
|
||||
result[name] = build_rjsf_output(cls)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to generate schema for {name}: {e}")
|
||||
continue
|
||||
return result
|
||||
|
||||
|
||||
@router.get("/information/schema/{model_name}/")
|
||||
def get_model_schema(request, model_name: str):
|
||||
"""Return RJSF schema for specific model."""
|
||||
cls = registry.get_model(model_name.lower())
|
||||
if not cls:
|
||||
raise HttpError(404, f"Unknown model: {model_name}")
|
||||
return build_rjsf_output(cls)
|
||||
|
||||
|
||||
@router.get("/scenario/{uuid:scenario_uuid}/information/")
|
||||
def list_scenario_info(request, scenario_uuid: UUID):
|
||||
"""List all additional information for a scenario"""
|
||||
scenario = get_scenario_for_read(request.user, scenario_uuid)
|
||||
|
||||
result = []
|
||||
for ai in scenario.get_additional_information():
|
||||
result.append(
|
||||
{
|
||||
"type": ai.__class__.__name__,
|
||||
"uuid": getattr(ai, "uuid", None),
|
||||
"data": ai.model_dump(mode="json"),
|
||||
}
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
@router.post("/scenario/{uuid:scenario_uuid}/information/{model_name}/")
|
||||
def add_scenario_info(
|
||||
request, scenario_uuid: UUID, model_name: str, payload: Dict[str, Any] = Body(...)
|
||||
):
|
||||
"""Add new additional information to scenario"""
|
||||
cls = registry.get_model(model_name.lower())
|
||||
if not cls:
|
||||
raise HttpError(404, f"Unknown model: {model_name}")
|
||||
|
||||
try:
|
||||
instance = cls(**payload) # Pydantic validates
|
||||
except ValidationError as e:
|
||||
handle_validation_error(e)
|
||||
|
||||
scenario = get_scenario_for_write(request.user, scenario_uuid)
|
||||
|
||||
# Model method now returns the UUID
|
||||
created_uuid = scenario.add_additional_information(instance)
|
||||
|
||||
return {"status": "created", "uuid": created_uuid}
|
||||
|
||||
|
||||
@router.patch("/scenario/{uuid:scenario_uuid}/information/item/{uuid:ai_uuid}/")
|
||||
def update_scenario_info(
|
||||
request, scenario_uuid: UUID, ai_uuid: UUID, payload: Dict[str, Any] = Body(...)
|
||||
):
|
||||
"""Update existing additional information for a scenario"""
|
||||
scenario = get_scenario_for_write(request.user, scenario_uuid)
|
||||
ai_uuid_str = str(ai_uuid)
|
||||
|
||||
# Find item to determine type for validation
|
||||
found_type = None
|
||||
for type_name, items in scenario.additional_information.items():
|
||||
if any(item.get("uuid") == ai_uuid_str for item in items):
|
||||
found_type = type_name
|
||||
break
|
||||
|
||||
if found_type is None:
|
||||
raise HttpError(404, f"Additional information not found: {ai_uuid}")
|
||||
|
||||
# Get the model class for validation
|
||||
cls = registry.get_model(found_type.lower())
|
||||
if not cls:
|
||||
raise HttpError(500, f"Unknown model type in data: {found_type}")
|
||||
|
||||
# Validate the payload against the model
|
||||
try:
|
||||
instance = cls(**payload)
|
||||
except ValidationError as e:
|
||||
handle_validation_error(e)
|
||||
|
||||
# Use model method for update
|
||||
try:
|
||||
scenario.update_additional_information(ai_uuid_str, instance)
|
||||
except ValueError as e:
|
||||
raise HttpError(404, str(e))
|
||||
|
||||
return {"status": "updated", "uuid": ai_uuid_str}
|
||||
|
||||
|
||||
@router.delete("/scenario/{uuid:scenario_uuid}/information/item/{uuid:ai_uuid}/")
|
||||
def delete_scenario_info(request, scenario_uuid: UUID, ai_uuid: UUID):
|
||||
"""Delete additional information from scenario"""
|
||||
scenario = get_scenario_for_write(request.user, scenario_uuid)
|
||||
|
||||
try:
|
||||
scenario.remove_additional_information(str(ai_uuid))
|
||||
except ValueError as e:
|
||||
raise HttpError(404, str(e))
|
||||
|
||||
return {"status": "deleted"}
|
||||
|
||||
|
||||
@router.get("/information/groups/")
|
||||
def list_groups(request):
|
||||
"""Return list of available group names."""
|
||||
return {"groups": GroupEnum.values()}
|
||||
|
||||
|
||||
@router.get("/information/groups/{group_name}/")
|
||||
def get_group_models(request, group_name: str):
|
||||
"""
|
||||
Return models for a specific group organized by subcategory.
|
||||
|
||||
Args:
|
||||
group_name: One of "sludge", "soil", or "sediment" (string)
|
||||
|
||||
Returns:
|
||||
Dictionary with subcategories (exp, spike, comp, misc, or group name)
|
||||
as keys and lists of model info as values
|
||||
"""
|
||||
# Convert string to enum (raises ValueError if invalid)
|
||||
try:
|
||||
group_enum = GroupEnum(group_name)
|
||||
except ValueError:
|
||||
valid = ", ".join(GroupEnum.values())
|
||||
raise HttpError(400, f"Invalid group '{group_name}'. Valid: {valid}")
|
||||
|
||||
try:
|
||||
group_data = registry.collect_group(group_enum)
|
||||
except (ValueError, TypeError) as e:
|
||||
raise HttpError(400, str(e))
|
||||
|
||||
result = {}
|
||||
for subcategory, models in group_data.items():
|
||||
result[subcategory] = [
|
||||
{
|
||||
"name": cls.__name__.lower(),
|
||||
"class": cls.__name__,
|
||||
"title": getattr(cls.UI, "title", cls.__name__)
|
||||
if hasattr(cls, "UI")
|
||||
else cls.__name__,
|
||||
}
|
||||
for cls in models
|
||||
]
|
||||
|
||||
return result
|
||||
41
epapi/v1/endpoints/compounds.py
Normal file
@ -0,0 +1,41 @@
|
||||
from django.conf import settings as s
|
||||
from ninja import Router
|
||||
from ninja_extra.pagination import paginate
|
||||
from uuid import UUID
|
||||
|
||||
from epdb.models import Compound
|
||||
from ..pagination import EnhancedPageNumberPagination
|
||||
from ..schemas import CompoundOutSchema, ReviewStatusFilter
|
||||
from ..dal import get_user_entities_for_read, get_package_entities_for_read
|
||||
|
||||
router = Router()
|
||||
|
||||
|
||||
@router.get("/compounds/", response=EnhancedPageNumberPagination.Output[CompoundOutSchema])
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_all_compounds(request):
|
||||
"""
|
||||
List all compounds from reviewed packages.
|
||||
"""
|
||||
return get_user_entities_for_read(Compound, request.user).order_by("name").all()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/package/{uuid:package_uuid}/compound/",
|
||||
response=EnhancedPageNumberPagination.Output[CompoundOutSchema],
|
||||
)
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_package_compounds(request, package_uuid: UUID):
|
||||
"""
|
||||
List all compounds for a specific package.
|
||||
"""
|
||||
user = request.user
|
||||
return get_package_entities_for_read(Compound, package_uuid, user).order_by("name").all()
|
||||
41
epapi/v1/endpoints/models.py
Normal file
@ -0,0 +1,41 @@
|
||||
from django.conf import settings as s
|
||||
from ninja import Router
|
||||
from ninja_extra.pagination import paginate
|
||||
from uuid import UUID
|
||||
|
||||
from epdb.models import EPModel
|
||||
from ..pagination import EnhancedPageNumberPagination
|
||||
from ..schemas import ModelOutSchema, ReviewStatusFilter
|
||||
from ..dal import get_user_entities_for_read, get_package_entities_for_read
|
||||
|
||||
router = Router()
|
||||
|
||||
|
||||
@router.get("/models/", response=EnhancedPageNumberPagination.Output[ModelOutSchema])
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_all_models(request):
|
||||
"""
|
||||
List all models from reviewed packages.
|
||||
"""
|
||||
return get_user_entities_for_read(EPModel, request.user).order_by("name").all()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/package/{uuid:package_uuid}/model/",
|
||||
response=EnhancedPageNumberPagination.Output[ModelOutSchema],
|
||||
)
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_package_models(request, package_uuid: UUID):
|
||||
"""
|
||||
List all models for a specific package.
|
||||
"""
|
||||
user = request.user
|
||||
return get_package_entities_for_read(EPModel, package_uuid, user).order_by("name").all()
|
||||
32
epapi/v1/endpoints/packages.py
Normal file
@ -0,0 +1,32 @@
|
||||
from django.conf import settings as s
|
||||
from ninja import Router
|
||||
from ninja_extra.pagination import paginate
|
||||
import logging
|
||||
|
||||
from ..auth import OptionalBearerTokenAuth
|
||||
from ..dal import get_user_packages_for_read
|
||||
from ..pagination import EnhancedPageNumberPagination
|
||||
from ..schemas import PackageOutSchema, SelfReviewStatusFilter
|
||||
|
||||
router = Router()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/packages/",
|
||||
response=EnhancedPageNumberPagination.Output[PackageOutSchema],
|
||||
auth=OptionalBearerTokenAuth(),
|
||||
)
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=SelfReviewStatusFilter,
|
||||
)
|
||||
def list_all_packages(request):
|
||||
"""
|
||||
List packages accessible to the user.
|
||||
|
||||
"""
|
||||
user = request.user
|
||||
qs = get_user_packages_for_read(user)
|
||||
return qs.order_by("name").all()
|
||||
42
epapi/v1/endpoints/pathways.py
Normal file
@ -0,0 +1,42 @@
|
||||
from django.conf import settings as s
|
||||
from ninja import Router
|
||||
from ninja_extra.pagination import paginate
|
||||
from uuid import UUID
|
||||
|
||||
from epdb.models import Pathway
|
||||
from ..pagination import EnhancedPageNumberPagination
|
||||
from ..schemas import PathwayOutSchema, ReviewStatusFilter
|
||||
from ..dal import get_user_entities_for_read, get_package_entities_for_read
|
||||
|
||||
router = Router()
|
||||
|
||||
|
||||
@router.get("/pathways/", response=EnhancedPageNumberPagination.Output[PathwayOutSchema])
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_all_pathways(request):
|
||||
"""
|
||||
List all pathways from reviewed packages.
|
||||
"""
|
||||
user = request.user
|
||||
return get_user_entities_for_read(Pathway, user).order_by("name").all()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/package/{uuid:package_uuid}/pathway/",
|
||||
response=EnhancedPageNumberPagination.Output[PathwayOutSchema],
|
||||
)
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_package_pathways(request, package_uuid: UUID):
|
||||
"""
|
||||
List all pathways for a specific package.
|
||||
"""
|
||||
user = request.user
|
||||
return get_package_entities_for_read(Pathway, package_uuid, user).order_by("name").all()
|
||||
42
epapi/v1/endpoints/reactions.py
Normal file
@ -0,0 +1,42 @@
|
||||
from django.conf import settings as s
|
||||
from ninja import Router
|
||||
from ninja_extra.pagination import paginate
|
||||
from uuid import UUID
|
||||
|
||||
from epdb.models import Reaction
|
||||
from ..pagination import EnhancedPageNumberPagination
|
||||
from ..schemas import ReactionOutSchema, ReviewStatusFilter
|
||||
from ..dal import get_user_entities_for_read, get_package_entities_for_read
|
||||
|
||||
router = Router()
|
||||
|
||||
|
||||
@router.get("/reactions/", response=EnhancedPageNumberPagination.Output[ReactionOutSchema])
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_all_reactions(request):
|
||||
"""
|
||||
List all reactions from reviewed packages.
|
||||
"""
|
||||
user = request.user
|
||||
return get_user_entities_for_read(Reaction, user).order_by("name").all()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/package/{uuid:package_uuid}/reaction/",
|
||||
response=EnhancedPageNumberPagination.Output[ReactionOutSchema],
|
||||
)
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_package_reactions(request, package_uuid: UUID):
|
||||
"""
|
||||
List all reactions for a specific package.
|
||||
"""
|
||||
user = request.user
|
||||
return get_package_entities_for_read(Reaction, package_uuid, user).order_by("name").all()
|
||||
42
epapi/v1/endpoints/rules.py
Normal file
@ -0,0 +1,42 @@
|
||||
from django.conf import settings as s
|
||||
from ninja import Router
|
||||
from ninja_extra.pagination import paginate
|
||||
from uuid import UUID
|
||||
|
||||
from epdb.models import Rule
|
||||
from ..pagination import EnhancedPageNumberPagination
|
||||
from ..schemas import ReviewStatusFilter, RuleOutSchema
|
||||
from ..dal import get_user_entities_for_read, get_package_entities_for_read
|
||||
|
||||
router = Router()
|
||||
|
||||
|
||||
@router.get("/rules/", response=EnhancedPageNumberPagination.Output[RuleOutSchema])
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_all_rules(request):
|
||||
"""
|
||||
List all rules from reviewed packages.
|
||||
"""
|
||||
user = request.user
|
||||
return get_user_entities_for_read(Rule, user).order_by("name").all()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/package/{uuid:package_uuid}/rule/",
|
||||
response=EnhancedPageNumberPagination.Output[RuleOutSchema],
|
||||
)
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ReviewStatusFilter,
|
||||
)
|
||||
def list_package_rules(request, package_uuid: UUID):
|
||||
"""
|
||||
List all rules for a specific package.
|
||||
"""
|
||||
user = request.user
|
||||
return get_package_entities_for_read(Rule, package_uuid, user).order_by("name").all()
|
||||
130
epapi/v1/endpoints/scenarios.py
Normal file
@ -0,0 +1,130 @@
|
||||
from django.conf import settings as s
|
||||
from django.db import IntegrityError, OperationalError, DatabaseError
|
||||
from ninja import Router, Body
|
||||
from ninja.errors import HttpError
|
||||
from ninja_extra.pagination import paginate
|
||||
from uuid import UUID
|
||||
from pydantic import ValidationError
|
||||
import logging
|
||||
import json
|
||||
|
||||
from epdb.models import Scenario
|
||||
from epdb.logic import PackageManager
|
||||
from epdb.views import _anonymous_or_real
|
||||
from ..pagination import EnhancedPageNumberPagination
|
||||
from ..schemas import (
|
||||
ScenarioOutSchema,
|
||||
ScenarioCreateSchema,
|
||||
ScenarioReviewStatusAndRelatedFilter,
|
||||
)
|
||||
from ..dal import get_user_entities_for_read, get_package_entities_for_read
|
||||
from envipy_additional_information import registry
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = Router()
|
||||
|
||||
|
||||
@router.get("/scenarios/", response=EnhancedPageNumberPagination.Output[ScenarioOutSchema])
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ScenarioReviewStatusAndRelatedFilter,
|
||||
)
|
||||
def list_all_scenarios(request):
|
||||
user = request.user
|
||||
items = get_user_entities_for_read(Scenario, user)
|
||||
return items.order_by("name").all()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/package/{uuid:package_uuid}/scenario/",
|
||||
response=EnhancedPageNumberPagination.Output[ScenarioOutSchema],
|
||||
)
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=ScenarioReviewStatusAndRelatedFilter,
|
||||
)
|
||||
def list_package_scenarios(request, package_uuid: UUID):
|
||||
user = request.user
|
||||
items = get_package_entities_for_read(Scenario, package_uuid, user)
|
||||
return items.order_by("name").all()
|
||||
|
||||
|
||||
@router.post("/package/{uuid:package_uuid}/scenario/", response=ScenarioOutSchema)
|
||||
def create_scenario(request, package_uuid: UUID, payload: ScenarioCreateSchema = Body(...)):
|
||||
"""Create a new scenario with optional additional information."""
|
||||
user = _anonymous_or_real(request)
|
||||
|
||||
try:
|
||||
current_package = PackageManager.get_package_by_id(user, package_uuid)
|
||||
except ValueError as e:
|
||||
error_msg = str(e)
|
||||
if "does not exist" in error_msg:
|
||||
raise HttpError(404, f"Package not found: {package_uuid}")
|
||||
elif "Insufficient permissions" in error_msg:
|
||||
raise HttpError(403, "You do not have permission to access this package")
|
||||
else:
|
||||
logger.error(f"Unexpected ValueError from get_package_by_id: {error_msg}")
|
||||
raise HttpError(400, "Invalid package request")
|
||||
|
||||
# Build additional information models from payload
|
||||
additional_information_models = []
|
||||
validation_errors = []
|
||||
|
||||
for ai_item in payload.additional_information:
|
||||
# Get model class from registry
|
||||
model_cls = registry.get_model(ai_item.type.lower())
|
||||
if not model_cls:
|
||||
validation_errors.append(f"Unknown additional information type: {ai_item.type}")
|
||||
continue
|
||||
|
||||
try:
|
||||
# Validate and create model instance
|
||||
instance = model_cls(**ai_item.data)
|
||||
additional_information_models.append(instance)
|
||||
except ValidationError as e:
|
||||
# Collect validation errors to return to user
|
||||
error_messages = [err.get("msg", "Validation error") for err in e.errors()]
|
||||
validation_errors.append(f"{ai_item.type}: {', '.join(error_messages)}")
|
||||
except (TypeError, AttributeError, KeyError) as e:
|
||||
logger.warning(f"Failed to instantiate {ai_item.type} model: {str(e)}")
|
||||
validation_errors.append(f"{ai_item.type}: Invalid data structure - {str(e)}")
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error instantiating {ai_item.type}: {str(e)}")
|
||||
validation_errors.append(f"{ai_item.type}: Failed to process - please check your data")
|
||||
|
||||
# If there are validation errors, return them
|
||||
if validation_errors:
|
||||
raise HttpError(
|
||||
400,
|
||||
json.dumps(
|
||||
{
|
||||
"error": "Validation errors in additional information",
|
||||
"details": validation_errors,
|
||||
}
|
||||
),
|
||||
)
|
||||
|
||||
# Create scenario using the existing Scenario.create method
|
||||
try:
|
||||
new_scenario = Scenario.create(
|
||||
package=current_package,
|
||||
name=payload.name,
|
||||
description=payload.description,
|
||||
scenario_date=payload.scenario_date,
|
||||
scenario_type=payload.scenario_type,
|
||||
additional_information=additional_information_models,
|
||||
)
|
||||
except IntegrityError as e:
|
||||
logger.error(f"Database integrity error creating scenario: {str(e)}")
|
||||
raise HttpError(400, "Scenario creation failed - data constraint violation")
|
||||
except OperationalError as e:
|
||||
logger.error(f"Database operational error creating scenario: {str(e)}")
|
||||
raise HttpError(503, "Database temporarily unavailable - please try again")
|
||||
except (DatabaseError, AttributeError) as e:
|
||||
logger.error(f"Error creating scenario: {str(e)}")
|
||||
raise HttpError(500, "Failed to create scenario due to database error")
|
||||
|
||||
return new_scenario
|
||||
23
epapi/v1/endpoints/settings.py
Normal file
@ -0,0 +1,23 @@
|
||||
from django.conf import settings as s
|
||||
from ninja import Router
|
||||
from ninja_extra.pagination import paginate
|
||||
|
||||
from epdb.logic import SettingManager
|
||||
|
||||
from ..pagination import EnhancedPageNumberPagination
|
||||
from ..schemas import SettingOutSchema
|
||||
|
||||
router = Router()
|
||||
|
||||
|
||||
@router.get("/settings/", response=EnhancedPageNumberPagination.Output[SettingOutSchema])
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
)
|
||||
def list_all_pathways(request):
|
||||
"""
|
||||
List all pathways from reviewed packages.
|
||||
"""
|
||||
user = request.user
|
||||
return SettingManager.get_all_settings(user)
|
||||
50
epapi/v1/endpoints/structure.py
Normal file
@ -0,0 +1,50 @@
|
||||
from django.conf import settings as s
|
||||
from ninja import Router
|
||||
from ninja_extra.pagination import paginate
|
||||
from uuid import UUID
|
||||
|
||||
from ..pagination import EnhancedPageNumberPagination
|
||||
from ..schemas import CompoundStructureOutSchema, StructureReviewStatusFilter
|
||||
from ..dal import (
|
||||
get_user_structure_for_read,
|
||||
get_package_compound_structure_for_read,
|
||||
)
|
||||
|
||||
router = Router()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/structures/", response=EnhancedPageNumberPagination.Output[CompoundStructureOutSchema]
|
||||
)
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=StructureReviewStatusFilter,
|
||||
)
|
||||
def list_all_structures(request):
|
||||
"""
|
||||
List all structures from all packages.
|
||||
"""
|
||||
user = request.user
|
||||
return get_user_structure_for_read(user).order_by("name").all()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/package/{uuid:package_uuid}/compound/{uuid:compound_uuid}/structure/",
|
||||
response=EnhancedPageNumberPagination.Output[CompoundStructureOutSchema],
|
||||
)
|
||||
@paginate(
|
||||
EnhancedPageNumberPagination,
|
||||
page_size=s.API_PAGINATION_DEFAULT_PAGE_SIZE,
|
||||
filter_schema=StructureReviewStatusFilter,
|
||||
)
|
||||
def list_package_structures(request, package_uuid: UUID, compound_uuid: UUID):
|
||||
"""
|
||||
List all structures for a specific package and compound.
|
||||
"""
|
||||
user = request.user
|
||||
return (
|
||||
get_package_compound_structure_for_read(package_uuid, compound_uuid, user)
|
||||
.order_by("name")
|
||||
.all()
|
||||
)
|
||||
28
epapi/v1/errors.py
Normal file
@ -0,0 +1,28 @@
|
||||
from ninja.errors import HttpError
|
||||
|
||||
|
||||
class EPAPIError(HttpError):
|
||||
status_code: int = 500
|
||||
|
||||
def __init__(self, message: str) -> None:
|
||||
super().__init__(status_code=self.status_code, message=message)
|
||||
|
||||
@classmethod
|
||||
def from_exception(cls, exc: Exception):
|
||||
return cls(message=str(exc))
|
||||
|
||||
|
||||
class EPAPIUnauthorizedError(EPAPIError):
|
||||
status_code = 401
|
||||
|
||||
|
||||
class EPAPIPermissionDeniedError(EPAPIError):
|
||||
status_code = 403
|
||||
|
||||
|
||||
class EPAPINotFoundError(EPAPIError):
|
||||
status_code = 404
|
||||
|
||||
|
||||
class EPAPIValidationError(EPAPIError):
|
||||
status_code = 422
|
||||
60
epapi/v1/pagination.py
Normal file
@ -0,0 +1,60 @@
|
||||
import math
|
||||
from typing import Any, Generic, List, TypeVar
|
||||
|
||||
from django.db.models import QuerySet
|
||||
from ninja import Schema
|
||||
from ninja.pagination import PageNumberPagination
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
class EnhancedPageNumberPagination(PageNumberPagination):
|
||||
class Output(Schema, Generic[T]):
|
||||
items: List[T]
|
||||
page: int
|
||||
page_size: int
|
||||
total_items: int
|
||||
total_pages: int
|
||||
|
||||
def paginate_queryset(
|
||||
self,
|
||||
queryset: QuerySet,
|
||||
pagination: PageNumberPagination.Input,
|
||||
**params: Any,
|
||||
) -> Any:
|
||||
page_size = self._get_page_size(pagination.page_size)
|
||||
offset = (pagination.page - 1) * page_size
|
||||
total_items = self._items_count(queryset)
|
||||
total_pages = math.ceil(total_items / page_size) if page_size > 0 else 0
|
||||
|
||||
return {
|
||||
"items": queryset[offset : offset + page_size],
|
||||
"page": pagination.page,
|
||||
"page_size": page_size,
|
||||
"total_items": total_items,
|
||||
"total_pages": total_pages,
|
||||
}
|
||||
|
||||
async def apaginate_queryset(
|
||||
self,
|
||||
queryset: QuerySet,
|
||||
pagination: PageNumberPagination.Input,
|
||||
**params: Any,
|
||||
) -> Any:
|
||||
page_size = self._get_page_size(pagination.page_size)
|
||||
offset = (pagination.page - 1) * page_size
|
||||
total_items = await self._aitems_count(queryset)
|
||||
total_pages = math.ceil(total_items / page_size) if page_size > 0 else 0
|
||||
|
||||
if isinstance(queryset, QuerySet):
|
||||
items = [obj async for obj in queryset[offset : offset + page_size]]
|
||||
else:
|
||||
items = queryset[offset : offset + page_size]
|
||||
|
||||
return {
|
||||
"items": items,
|
||||
"page": pagination.page,
|
||||
"page_size": page_size,
|
||||
"total_items": total_items,
|
||||
"total_pages": total_pages,
|
||||
}
|
||||
36
epapi/v1/router.py
Normal file
@ -0,0 +1,36 @@
|
||||
from ninja import Router
|
||||
from ninja.security import SessionAuth
|
||||
|
||||
from .auth import BearerTokenAuth
|
||||
from .endpoints import (
|
||||
packages,
|
||||
scenarios,
|
||||
compounds,
|
||||
rules,
|
||||
reactions,
|
||||
pathways,
|
||||
models,
|
||||
structure,
|
||||
additional_information,
|
||||
settings,
|
||||
)
|
||||
|
||||
# Main router with authentication
|
||||
router = Router(
|
||||
auth=[
|
||||
SessionAuth(),
|
||||
BearerTokenAuth(),
|
||||
]
|
||||
)
|
||||
|
||||
# Include all endpoint routers
|
||||
router.add_router("", packages.router)
|
||||
router.add_router("", scenarios.router)
|
||||
router.add_router("", compounds.router)
|
||||
router.add_router("", rules.router)
|
||||
router.add_router("", reactions.router)
|
||||
router.add_router("", pathways.router)
|
||||
router.add_router("", models.router)
|
||||
router.add_router("", structure.router)
|
||||
router.add_router("", additional_information.router)
|
||||
router.add_router("", settings.router)
|
||||
134
epapi/v1/schemas.py
Normal file
@ -0,0 +1,134 @@
|
||||
from ninja import FilterSchema, FilterLookup, Schema
|
||||
from typing import Annotated, Optional, List, Dict, Any
|
||||
from uuid import UUID
|
||||
|
||||
|
||||
# Filter schema for query parameters
|
||||
class ReviewStatusFilter(FilterSchema):
|
||||
"""Filter schema for review_status query parameter."""
|
||||
|
||||
review_status: Annotated[Optional[bool], FilterLookup("package__reviewed")] = None
|
||||
|
||||
|
||||
class SelfReviewStatusFilter(FilterSchema):
|
||||
"""Filter schema for review_status query parameter on self-reviewed entities."""
|
||||
|
||||
review_status: Annotated[Optional[bool], FilterLookup("reviewed")] = None
|
||||
|
||||
|
||||
class StructureReviewStatusFilter(FilterSchema):
|
||||
"""Filter schema for review_status on structures (via compound->package)."""
|
||||
|
||||
review_status: Annotated[Optional[bool], FilterLookup("compound__package__reviewed")] = None
|
||||
|
||||
|
||||
class ScenarioReviewStatusAndRelatedFilter(ReviewStatusFilter):
|
||||
"""Filter schema for review_status and parent query parameter."""
|
||||
|
||||
exclude_related: Annotated[Optional[bool], FilterLookup("parent__isnull")] = None
|
||||
|
||||
|
||||
# Base schema for all package-scoped entities
|
||||
class PackageEntityOutSchema(Schema):
|
||||
"""Base schema for entities belonging to a package."""
|
||||
|
||||
uuid: UUID
|
||||
url: str = ""
|
||||
name: str
|
||||
description: str
|
||||
review_status: str = ""
|
||||
package: str = ""
|
||||
|
||||
@staticmethod
|
||||
def resolve_url(obj):
|
||||
return obj.url
|
||||
|
||||
@staticmethod
|
||||
def resolve_package(obj):
|
||||
return obj.package.url
|
||||
|
||||
@staticmethod
|
||||
def resolve_review_status(obj):
|
||||
return "reviewed" if obj.package.reviewed else "unreviewed"
|
||||
|
||||
|
||||
# All package-scoped entities inherit from base
|
||||
class ScenarioOutSchema(PackageEntityOutSchema):
|
||||
pass
|
||||
|
||||
|
||||
class AdditionalInformationItemSchema(Schema):
|
||||
"""Schema for additional information item in scenario creation."""
|
||||
|
||||
type: str
|
||||
data: Dict[str, Any]
|
||||
|
||||
|
||||
class ScenarioCreateSchema(Schema):
|
||||
"""Schema for creating a new scenario."""
|
||||
|
||||
name: str
|
||||
description: str = ""
|
||||
scenario_date: str = "No date"
|
||||
scenario_type: str = "Not specified"
|
||||
additional_information: List[AdditionalInformationItemSchema] = []
|
||||
|
||||
|
||||
class CompoundOutSchema(PackageEntityOutSchema):
|
||||
pass
|
||||
|
||||
|
||||
class RuleOutSchema(PackageEntityOutSchema):
|
||||
pass
|
||||
|
||||
|
||||
class ReactionOutSchema(PackageEntityOutSchema):
|
||||
pass
|
||||
|
||||
|
||||
class PathwayOutSchema(PackageEntityOutSchema):
|
||||
pass
|
||||
|
||||
|
||||
class ModelOutSchema(PackageEntityOutSchema):
|
||||
pass
|
||||
|
||||
|
||||
class CompoundStructureOutSchema(PackageEntityOutSchema):
|
||||
compound: str = ""
|
||||
|
||||
@staticmethod
|
||||
def resolve_compound(obj):
|
||||
return obj.compound.url
|
||||
|
||||
@staticmethod
|
||||
def resolve_package(obj):
|
||||
return obj.compound.package.url
|
||||
|
||||
@staticmethod
|
||||
def resolve_review_status(obj):
|
||||
return "reviewed" if obj.compound.package.reviewed else "unreviewed"
|
||||
|
||||
|
||||
# Package is special (no package FK)
|
||||
class PackageOutSchema(Schema):
|
||||
uuid: UUID
|
||||
url: str = ""
|
||||
name: str
|
||||
description: str
|
||||
review_status: str = ""
|
||||
|
||||
@staticmethod
|
||||
def resolve_url(obj):
|
||||
return obj.url
|
||||
|
||||
@staticmethod
|
||||
def resolve_review_status(obj):
|
||||
return "reviewed" if obj.reviewed else "unreviewed"
|
||||
|
||||
|
||||
class SettingOutSchema(Schema):
|
||||
uuid: UUID
|
||||
url: str = ""
|
||||
name: str
|
||||
description: str
|
||||
@ -1,31 +1,34 @@
|
||||
from django.conf import settings as s
|
||||
from django.contrib import admin
|
||||
|
||||
from .models import (
|
||||
User,
|
||||
UserPackagePermission,
|
||||
Group,
|
||||
GroupPackagePermission,
|
||||
Package,
|
||||
MLRelativeReasoning,
|
||||
EnviFormer,
|
||||
Compound,
|
||||
CompoundStructure,
|
||||
SimpleAmbitRule,
|
||||
ParallelRule,
|
||||
Reaction,
|
||||
Pathway,
|
||||
Node,
|
||||
Edge,
|
||||
Scenario,
|
||||
Setting,
|
||||
EnviFormer,
|
||||
ExternalDatabase,
|
||||
ExternalIdentifier,
|
||||
Group,
|
||||
GroupPackagePermission,
|
||||
JobLog,
|
||||
License,
|
||||
MLRelativeReasoning,
|
||||
Node,
|
||||
ParallelRule,
|
||||
Pathway,
|
||||
Reaction,
|
||||
Scenario,
|
||||
Setting,
|
||||
SimpleAmbitRule,
|
||||
User,
|
||||
UserPackagePermission,
|
||||
)
|
||||
|
||||
Package = s.GET_PACKAGE_MODEL()
|
||||
|
||||
|
||||
class UserAdmin(admin.ModelAdmin):
|
||||
list_display = ["username", "email", "is_active"]
|
||||
list_display = ["username", "email", "is_active", "is_staff", "is_superuser"]
|
||||
|
||||
|
||||
class UserPackagePermissionAdmin(admin.ModelAdmin):
|
||||
@ -45,7 +48,7 @@ class JobLogAdmin(admin.ModelAdmin):
|
||||
|
||||
|
||||
class EPAdmin(admin.ModelAdmin):
|
||||
search_fields = ["name", "description"]
|
||||
search_fields = ["name", "description", "url", "uuid"]
|
||||
list_display = ["name", "url", "created"]
|
||||
ordering = ["-created"]
|
||||
|
||||
@ -62,6 +65,10 @@ class EnviFormerAdmin(EPAdmin):
|
||||
pass
|
||||
|
||||
|
||||
class LicenseAdmin(admin.ModelAdmin):
|
||||
list_display = ["cc_string", "link", "image_link"]
|
||||
|
||||
|
||||
class CompoundAdmin(EPAdmin):
|
||||
pass
|
||||
|
||||
@ -118,6 +125,7 @@ admin.site.register(JobLog, JobLogAdmin)
|
||||
admin.site.register(Package, PackageAdmin)
|
||||
admin.site.register(MLRelativeReasoning, MLRelativeReasoningAdmin)
|
||||
admin.site.register(EnviFormer, EnviFormerAdmin)
|
||||
admin.site.register(License, LicenseAdmin)
|
||||
admin.site.register(Compound, CompoundAdmin)
|
||||
admin.site.register(CompoundStructure, CompoundStructureAdmin)
|
||||
admin.site.register(SimpleAmbitRule, SimpleAmbitRuleAdmin)
|
||||
|
||||
14
epdb/api.py
@ -2,20 +2,12 @@ from typing import List
|
||||
|
||||
from django.contrib.auth import get_user_model
|
||||
from ninja import Router, Schema, Field
|
||||
from ninja.errors import HttpError
|
||||
from ninja.pagination import paginate
|
||||
from ninja.security import HttpBearer
|
||||
|
||||
from epapi.v1.auth import BearerTokenAuth
|
||||
|
||||
from .logic import PackageManager
|
||||
from .models import User, Compound, APIToken
|
||||
|
||||
|
||||
class BearerTokenAuth(HttpBearer):
|
||||
def authenticate(self, request, token):
|
||||
for token_obj in APIToken.objects.select_related("user").all():
|
||||
if token_obj.check_token(token) and token_obj.is_valid():
|
||||
return token_obj.user
|
||||
raise HttpError(401, "Invalid or expired token")
|
||||
from .models import User, Compound
|
||||
|
||||
|
||||
def _anonymous_or_real(request):
|
||||
|
||||
@ -1,4 +1,9 @@
|
||||
import logging
|
||||
|
||||
from django.apps import AppConfig
|
||||
from django.conf import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EPDBConfig(AppConfig):
|
||||
@ -7,3 +12,6 @@ class EPDBConfig(AppConfig):
|
||||
|
||||
def ready(self):
|
||||
import epdb.signals # noqa: F401
|
||||
|
||||
model_name = getattr(settings, "EPDB_PACKAGE_MODEL", "epdb.Package")
|
||||
logger.info(f"Using Package model: {model_name}")
|
||||
|
||||
32
epdb/context_processors.py
Normal file
@ -0,0 +1,32 @@
|
||||
"""
|
||||
Context processors for enviPy application.
|
||||
|
||||
Context processors automatically make variables available to all templates.
|
||||
"""
|
||||
|
||||
from .logic import PackageManager
|
||||
from django.conf import settings as s
|
||||
|
||||
|
||||
def package_context(request):
|
||||
"""
|
||||
Provides package data for the search modal which is included globally
|
||||
in framework_modern.html.
|
||||
|
||||
Returns:
|
||||
dict: Context dictionary with reviewed and unreviewed packages
|
||||
"""
|
||||
current_user = request.user
|
||||
|
||||
reviewed_package_qs = PackageManager.get_reviewed_packages()
|
||||
|
||||
unreviewed_package_qs = s.GET_PACKAGE_MODEL().objects.none()
|
||||
|
||||
# Only get user-specific packages if user is authenticated
|
||||
if current_user.is_authenticated:
|
||||
unreviewed_package_qs = PackageManager.get_all_readable_packages(current_user)
|
||||
|
||||
return {
|
||||
"reviewed_packages": reviewed_package_qs,
|
||||
"unreviewed_packages": unreviewed_package_qs,
|
||||
}
|
||||
359
epdb/logic.py
@ -1,39 +1,41 @@
|
||||
import re
|
||||
import logging
|
||||
import json
|
||||
from typing import Union, List, Optional, Set, Dict, Any
|
||||
import logging
|
||||
import re
|
||||
from typing import Any, Dict, List, Optional, Set, Union, Tuple
|
||||
from uuid import UUID
|
||||
|
||||
import nh3
|
||||
from django.conf import settings as s
|
||||
from django.contrib.auth import get_user_model
|
||||
from django.db import transaction
|
||||
from django.conf import settings as s
|
||||
from pydantic import ValidationError
|
||||
|
||||
from epdb.models import (
|
||||
User,
|
||||
Package,
|
||||
UserPackagePermission,
|
||||
GroupPackagePermission,
|
||||
Permission,
|
||||
Group,
|
||||
Setting,
|
||||
EPModel,
|
||||
UserSettingPermission,
|
||||
Rule,
|
||||
Pathway,
|
||||
Node,
|
||||
Edge,
|
||||
Compound,
|
||||
Reaction,
|
||||
CompoundStructure,
|
||||
Edge,
|
||||
EnzymeLink,
|
||||
EPModel,
|
||||
ExpansionSchemeChoice,
|
||||
Group,
|
||||
GroupPackagePermission,
|
||||
Node,
|
||||
Pathway,
|
||||
Permission,
|
||||
Reaction,
|
||||
Rule,
|
||||
Setting,
|
||||
User,
|
||||
UserPackagePermission,
|
||||
UserSettingPermission,
|
||||
)
|
||||
from utilities.chem import FormatConverter
|
||||
from utilities.misc import PackageImporter, PackageExporter
|
||||
from utilities.misc import PackageExporter, PackageImporter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
Package = s.GET_PACKAGE_MODEL()
|
||||
|
||||
|
||||
class EPDBURLParser:
|
||||
UUID_PATTERN = r"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}"
|
||||
@ -192,8 +194,6 @@ class UserManager(object):
|
||||
if clean_username != username or clean_email != email:
|
||||
# This will be caught by the try in view.py/register
|
||||
raise ValueError("Invalid username or password")
|
||||
# avoid circular import :S
|
||||
from .tasks import send_registration_mail
|
||||
|
||||
extra_fields = {"is_active": not s.ADMIN_APPROVAL_REQUIRED}
|
||||
|
||||
@ -212,10 +212,6 @@ class UserManager(object):
|
||||
u.default_package = p
|
||||
u.save()
|
||||
|
||||
if not u.is_active:
|
||||
# send email for verification
|
||||
send_registration_mail.delay(u.pk)
|
||||
|
||||
if set_setting:
|
||||
u.default_setting = Setting.objects.get(global_default=True)
|
||||
u.save()
|
||||
@ -442,6 +438,7 @@ class PackageManager(object):
|
||||
if PackageManager.readable(user, p):
|
||||
return p
|
||||
else:
|
||||
# FIXME: use custom exception to be translatable to 403 in API
|
||||
raise ValueError(
|
||||
"Insufficient permissions to access Package with ID {}".format(package_id)
|
||||
)
|
||||
@ -578,30 +575,39 @@ class PackageManager(object):
|
||||
else:
|
||||
_ = perm_cls.objects.update_or_create(defaults={"permission": new_perm}, **data)
|
||||
|
||||
@staticmethod
|
||||
def grant_read(caller: User, package: Package, grantee: Union[User, Group]):
|
||||
PackageManager.update_permissions(caller, package, grantee, Permission.READ[0])
|
||||
|
||||
@staticmethod
|
||||
def grant_write(caller: User, package: Package, grantee: Union[User, Group]):
|
||||
PackageManager.update_permissions(caller, package, grantee, Permission.WRITE[0])
|
||||
|
||||
@staticmethod
|
||||
@transaction.atomic
|
||||
def import_legacy_package(
|
||||
data: dict, owner: User, keep_ids=False, add_import_timestamp=True, trust_reviewed=False
|
||||
):
|
||||
from uuid import UUID, uuid4
|
||||
from datetime import datetime
|
||||
from collections import defaultdict
|
||||
from datetime import datetime
|
||||
from uuid import UUID, uuid4
|
||||
|
||||
from envipy_additional_information import AdditionalInformationConverter
|
||||
|
||||
from .models import (
|
||||
Package,
|
||||
Compound,
|
||||
CompoundStructure,
|
||||
SimpleRule,
|
||||
SimpleAmbitRule,
|
||||
Edge,
|
||||
Node,
|
||||
ParallelRule,
|
||||
Pathway,
|
||||
Reaction,
|
||||
Scenario,
|
||||
SequentialRule,
|
||||
SequentialRuleOrdering,
|
||||
Reaction,
|
||||
Pathway,
|
||||
Node,
|
||||
Edge,
|
||||
Scenario,
|
||||
SimpleAmbitRule,
|
||||
SimpleRule,
|
||||
)
|
||||
from envipy_additional_information import AdditionalInformationConverter
|
||||
|
||||
pack = Package()
|
||||
pack.uuid = UUID(data["id"].split("/")[-1]) if keep_ids else uuid4()
|
||||
@ -673,7 +679,7 @@ class PackageManager(object):
|
||||
ai_data = json.loads(res.model_dump_json())
|
||||
ai_data["uuid"] = f"{uuid4()}"
|
||||
new_add_inf[res_cls_name].append(ai_data)
|
||||
except ValidationError:
|
||||
except (ValidationError, ValueError):
|
||||
logger.error(f"Failed to convert {name} with {addinf_data}")
|
||||
|
||||
scen.additional_information = new_add_inf
|
||||
@ -1106,6 +1112,7 @@ class SettingManager(object):
|
||||
rule_packages: List[Package] = None,
|
||||
model: EPModel = None,
|
||||
model_threshold: float = None,
|
||||
expansion_scheme: ExpansionSchemeChoice = ExpansionSchemeChoice.BFS,
|
||||
):
|
||||
new_s = Setting()
|
||||
# Clean for potential XSS
|
||||
@ -1388,6 +1395,9 @@ class SEdge(object):
|
||||
self.rule = rule
|
||||
self.probability = probability
|
||||
|
||||
def product_smiles(self):
|
||||
return [p.smiles for p in self.products]
|
||||
|
||||
def __hash__(self):
|
||||
full_hash = 0
|
||||
|
||||
@ -1473,6 +1483,7 @@ class SPathway(object):
|
||||
self.smiles_to_node: Dict[str, SNode] = dict(**{n.smiles: n for n in self.root_nodes})
|
||||
self.edges: Set["SEdge"] = set()
|
||||
self.done = False
|
||||
self.empty_due_to_threshold = False
|
||||
|
||||
@staticmethod
|
||||
def from_pathway(pw: "Pathway", persist: bool = True):
|
||||
@ -1537,6 +1548,207 @@ class SPathway(object):
|
||||
|
||||
return sorted(res, key=lambda x: hash(x))
|
||||
|
||||
def _expand(self, substrates: List[SNode]) -> Tuple[List[SNode], List[SEdge]]:
|
||||
"""
|
||||
Expands the given substrates by generating new nodes and edges based on prediction settings.
|
||||
|
||||
This method processes a list of substrates and expands them into new nodes and edges using defined
|
||||
rules and settings. It evaluates each substrate to determine its applicability domain, persists
|
||||
domain assessments, and generates candidates for further processing. Newly created nodes and edges
|
||||
are returned, and any applicable information is stored or updated internally during the process.
|
||||
|
||||
Parameters:
|
||||
substrates (List[SNode]): A list of substrate nodes to be expanded.
|
||||
|
||||
Returns:
|
||||
Tuple[List[SNode], List[SEdge]]:
|
||||
A tuple containing:
|
||||
- A list of new nodes generated during the expansion.
|
||||
- A list of new edges representing connections between nodes based on candidate reactions.
|
||||
|
||||
Raises:
|
||||
ValueError: If a node does not have an ID when it should have been saved already.
|
||||
"""
|
||||
new_nodes: List[SNode] = []
|
||||
new_edges: List[SEdge] = []
|
||||
|
||||
for sub in substrates:
|
||||
# For App Domain we have to ensure that each Node is evaluated
|
||||
if sub.app_domain_assessment is None:
|
||||
if self.prediction_setting.model:
|
||||
if self.prediction_setting.model.app_domain:
|
||||
app_domain_assessment = self.prediction_setting.model.app_domain.assess(
|
||||
sub.smiles
|
||||
)
|
||||
|
||||
if self.persist is not None:
|
||||
n = self.snode_persist_lookup[sub]
|
||||
|
||||
if n.id is None:
|
||||
raise ValueError(f"Node {n} has no ID... aborting!")
|
||||
|
||||
node_data = n.simple_json()
|
||||
node_data["image"] = f"{n.url}?image=svg"
|
||||
app_domain_assessment["assessment"]["node"] = node_data
|
||||
|
||||
n.kv["app_domain_assessment"] = app_domain_assessment
|
||||
n.save()
|
||||
|
||||
sub.app_domain_assessment = app_domain_assessment
|
||||
|
||||
expansion_result = self.prediction_setting.expand(self, sub)
|
||||
|
||||
# We don't have any substrate, but technically we have at least one rule that triggered.
|
||||
# If our substrate is a root node a.k.a. depth == 0 store that info in SPathway
|
||||
if (
|
||||
len(expansion_result["transformations"]) == 0
|
||||
and expansion_result["rule_triggered"]
|
||||
and sub.depth == 0
|
||||
):
|
||||
self.empty_due_to_threshold = True
|
||||
|
||||
# Emit directly
|
||||
if self.persist is not None:
|
||||
self.persist.kv["empty_due_to_threshold"] = True
|
||||
self.persist.save()
|
||||
|
||||
# candidates is a List of PredictionResult. The length of the List is equal to the number of rules
|
||||
for cand_set in expansion_result["transformations"]:
|
||||
if cand_set:
|
||||
# cand_set is a PredictionResult object that can consist of multiple candidate reactions
|
||||
for cand in cand_set:
|
||||
cand_nodes = []
|
||||
# candidate reactions can have multiple fragments
|
||||
for c in cand:
|
||||
if c not in self.smiles_to_node:
|
||||
# For new nodes do an AppDomain Assessment if an AppDomain is attached
|
||||
app_domain_assessment = None
|
||||
if self.prediction_setting.model:
|
||||
if self.prediction_setting.model.app_domain:
|
||||
app_domain_assessment = (
|
||||
self.prediction_setting.model.app_domain.assess(c)
|
||||
)
|
||||
snode = SNode(c, sub.depth + 1, app_domain_assessment)
|
||||
self.smiles_to_node[c] = snode
|
||||
new_nodes.append(snode)
|
||||
|
||||
node = self.smiles_to_node[c]
|
||||
cand_nodes.append(node)
|
||||
|
||||
edge = SEdge(
|
||||
sub,
|
||||
cand_nodes,
|
||||
rule=cand_set.rule,
|
||||
probability=cand_set.probability,
|
||||
)
|
||||
self.edges.add(edge)
|
||||
new_edges.append(edge)
|
||||
|
||||
return new_nodes, new_edges
|
||||
|
||||
def predict(self):
|
||||
"""
|
||||
Predicts outcomes based on a graph traversal algorithm using the specified expansion schema.
|
||||
|
||||
This method iteratively explores the nodes of a graph starting from the root nodes, propagating
|
||||
probabilities through edges, and updating the probabilities of the connected nodes. The traversal
|
||||
can follow one of three predefined expansion schemas: Depth-First Search (DFS), Breadth-First Search
|
||||
(BFS), or a Greedy approach based on node probabilities. The methodology ensures that all reachable
|
||||
nodes are processed systematically according to the specified schema.
|
||||
|
||||
Errors will be raised if the expansion schema is undefined or invalid. Additionally, this method
|
||||
supports persisting changes by writing back data to the database when configured to do so.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
done : bool
|
||||
A flag indicating whether the prediction process is completed.
|
||||
persist : Any
|
||||
An optional object that manages persistence operations for saving modifications.
|
||||
root_nodes : List[SNode]
|
||||
A collection of initial nodes in the graph from which traversal begins.
|
||||
prediction_setting : Any
|
||||
Configuration object specifying settings for graph traversal, such as the choice of
|
||||
expansion schema.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If an invalid or unknown expansion schema is provided in `prediction_setting`.
|
||||
"""
|
||||
# populate initial queue
|
||||
queue = list(self.root_nodes)
|
||||
processed = set()
|
||||
|
||||
# initial nodes have prob 1.0
|
||||
node_probs: Dict[SNode, float] = {}
|
||||
node_probs.update({n: 1.0 for n in queue})
|
||||
|
||||
while queue:
|
||||
current = queue.pop(0)
|
||||
|
||||
if current in processed:
|
||||
continue
|
||||
|
||||
processed.add(current)
|
||||
|
||||
new_nodes, new_edges = self._expand([current])
|
||||
|
||||
if new_nodes or new_edges:
|
||||
# Check if we need to write back data to the database
|
||||
if self.persist:
|
||||
self._sync_to_pathway()
|
||||
# call save to update the internal modified field
|
||||
self.persist.save()
|
||||
|
||||
if new_nodes:
|
||||
for edge in new_edges:
|
||||
# All edge have `current` as educt
|
||||
# Use `current` and adjust probs
|
||||
current_prob = node_probs[current]
|
||||
|
||||
for prod in edge.products:
|
||||
# Either is a new product or a product and we found a path with a higher prob
|
||||
if (
|
||||
prod not in node_probs
|
||||
or current_prob * edge.probability > node_probs[prod]
|
||||
):
|
||||
node_probs[prod] = current_prob * edge.probability
|
||||
|
||||
# Update Queue to proceed
|
||||
if self.prediction_setting.expansion_scheme == "DFS":
|
||||
for n in new_nodes:
|
||||
if n not in processed:
|
||||
# We want to follow this path -> prepend queue
|
||||
queue.insert(0, n)
|
||||
elif self.prediction_setting.expansion_scheme == "BFS":
|
||||
for n in new_nodes:
|
||||
if n not in processed:
|
||||
# Add at the end, everything queued before will be processed
|
||||
# before new_nodese
|
||||
queue.append(n)
|
||||
elif self.prediction_setting.expansion_scheme == "GREEDY":
|
||||
# Simply add them, as we will re-order the queue later
|
||||
for n in new_nodes:
|
||||
if n not in processed:
|
||||
queue.append(n)
|
||||
|
||||
node_and_probs = []
|
||||
for queued_val in queue:
|
||||
node_and_probs.append((queued_val, node_probs[queued_val]))
|
||||
|
||||
# re-order the queue and only pick smiles
|
||||
queue = [
|
||||
n[0] for n in sorted(node_and_probs, key=lambda x: x[1], reverse=True)
|
||||
]
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unknown expansion schema: {self.prediction_setting.expansion_scheme}"
|
||||
)
|
||||
|
||||
# Queue exhausted, we're done
|
||||
self.done = True
|
||||
|
||||
def predict_step(self, from_depth: int = None, from_node: "Node" = None):
|
||||
substrates: List[SNode] = []
|
||||
|
||||
@ -1547,67 +1759,15 @@ class SPathway(object):
|
||||
if from_node == v:
|
||||
substrates = [k]
|
||||
break
|
||||
else:
|
||||
raise ValueError(f"Node {from_node} not found in SPathway!")
|
||||
else:
|
||||
raise ValueError("Neither from_depth nor from_node_url specified")
|
||||
|
||||
new_tp = False
|
||||
if substrates:
|
||||
for sub in substrates:
|
||||
if sub.app_domain_assessment is None:
|
||||
if self.prediction_setting.model:
|
||||
if self.prediction_setting.model.app_domain:
|
||||
app_domain_assessment = self.prediction_setting.model.app_domain.assess(
|
||||
sub.smiles
|
||||
)
|
||||
|
||||
if self.persist is not None:
|
||||
n = self.snode_persist_lookup[sub]
|
||||
|
||||
assert n.id is not None, (
|
||||
"Node has no id! Should have been saved already... aborting!"
|
||||
)
|
||||
node_data = n.simple_json()
|
||||
node_data["image"] = f"{n.url}?image=svg"
|
||||
app_domain_assessment["assessment"]["node"] = node_data
|
||||
|
||||
n.kv["app_domain_assessment"] = app_domain_assessment
|
||||
n.save()
|
||||
|
||||
sub.app_domain_assessment = app_domain_assessment
|
||||
|
||||
candidates = self.prediction_setting.expand(self, sub)
|
||||
# candidates is a List of PredictionResult. The length of the List is equal to the number of rules
|
||||
for cand_set in candidates:
|
||||
if cand_set:
|
||||
new_tp = True
|
||||
# cand_set is a PredictionResult object that can consist of multiple candidate reactions
|
||||
for cand in cand_set:
|
||||
cand_nodes = []
|
||||
# candidate reactions can have multiple fragments
|
||||
for c in cand:
|
||||
if c not in self.smiles_to_node:
|
||||
# For new nodes do an AppDomain Assessment if an AppDomain is attached
|
||||
app_domain_assessment = None
|
||||
if self.prediction_setting.model:
|
||||
if self.prediction_setting.model.app_domain:
|
||||
app_domain_assessment = (
|
||||
self.prediction_setting.model.app_domain.assess(c)
|
||||
)
|
||||
|
||||
self.smiles_to_node[c] = SNode(
|
||||
c, sub.depth + 1, app_domain_assessment
|
||||
)
|
||||
|
||||
node = self.smiles_to_node[c]
|
||||
cand_nodes.append(node)
|
||||
|
||||
edge = SEdge(
|
||||
sub,
|
||||
cand_nodes,
|
||||
rule=cand_set.rule,
|
||||
probability=cand_set.probability,
|
||||
)
|
||||
self.edges.add(edge)
|
||||
new_nodes, _ = self._expand(substrates)
|
||||
new_tp = len(new_nodes) > 0
|
||||
|
||||
# In case no substrates are found, we're done.
|
||||
# For "predict from node" we're always done
|
||||
@ -1620,6 +1780,14 @@ class SPathway(object):
|
||||
# call save to update the internal modified field
|
||||
self.persist.save()
|
||||
|
||||
def get_edge_for_educt_smiles(self, smiles: str) -> List[SEdge]:
|
||||
res = []
|
||||
for e in self.edges:
|
||||
for n in e.educts:
|
||||
if n.smiles == smiles:
|
||||
res.append(e)
|
||||
return res
|
||||
|
||||
def _sync_to_pathway(self) -> None:
|
||||
logger.info("Updating Pathway with SPathway")
|
||||
|
||||
@ -1683,11 +1851,6 @@ class SPathway(object):
|
||||
"to": to_indices,
|
||||
}
|
||||
|
||||
# if edge.rule:
|
||||
# e['rule'] = {
|
||||
# 'name': edge.rule.name,
|
||||
# 'id': edge.rule.url,
|
||||
# }
|
||||
edges.append(e)
|
||||
|
||||
return {
|
||||
|
||||
@ -8,14 +8,19 @@ from epdb.logic import UserManager, GroupManager, PackageManager, SettingManager
|
||||
from epdb.models import (
|
||||
UserSettingPermission,
|
||||
MLRelativeReasoning,
|
||||
EnviFormer,
|
||||
Permission,
|
||||
User,
|
||||
ExternalDatabase,
|
||||
License,
|
||||
)
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
"-ol", "--only-licenses", action="store_true", help="Only create licenses."
|
||||
)
|
||||
|
||||
def create_users(self):
|
||||
# Anonymous User
|
||||
if not User.objects.filter(email="anon@envipath.com").exists():
|
||||
@ -83,6 +88,17 @@ class Command(BaseCommand):
|
||||
|
||||
return anon, admin, g, user0
|
||||
|
||||
def create_licenses(self):
|
||||
"""Create the six default licenses supported by enviPath"""
|
||||
cc_strings = ["by", "by-nc", "by-nc-nd", "by-nc-sa", "by-nd", "by-sa"]
|
||||
for cc_string in cc_strings:
|
||||
if not License.objects.filter(cc_string=cc_string).exists():
|
||||
new_license = License()
|
||||
new_license.cc_string = cc_string
|
||||
new_license.link = f"https://creativecommons.org/licenses/{cc_string}/4.0/"
|
||||
new_license.image_link = f"https://licensebuttons.net/l/{cc_string}/4.0/88x31.png"
|
||||
new_license.save()
|
||||
|
||||
def import_package(self, data, owner):
|
||||
return PackageManager.import_legacy_package(
|
||||
data, owner, keep_ids=True, add_import_timestamp=False, trust_reviewed=True
|
||||
@ -157,6 +173,10 @@ class Command(BaseCommand):
|
||||
|
||||
@transaction.atomic
|
||||
def handle(self, *args, **options):
|
||||
# Create licenses
|
||||
self.create_licenses()
|
||||
if options.get("only_licenses", False):
|
||||
return
|
||||
# Create users
|
||||
anon, admin, g, user0 = self.create_users()
|
||||
|
||||
@ -210,7 +230,6 @@ class Command(BaseCommand):
|
||||
package=pack,
|
||||
rule_packages=[mapping["EAWAG-BBD"]],
|
||||
data_packages=[mapping["EAWAG-BBD"]],
|
||||
eval_packages=[],
|
||||
threshold=0.5,
|
||||
name="ECC - BBD - T0.5",
|
||||
description="ML Relative Reasoning",
|
||||
@ -218,7 +237,3 @@ class Command(BaseCommand):
|
||||
|
||||
ml_model.build_dataset()
|
||||
ml_model.build_model()
|
||||
|
||||
# If available, create EnviFormerModel
|
||||
if s.ENVIFORMER_PRESENT:
|
||||
EnviFormer.create(pack, "EnviFormer - T0.5", "EnviFormer Model with Threshold 0.5", 0.5)
|
||||
|
||||
92
epdb/management/commands/create_api_token.py
Normal file
@ -0,0 +1,92 @@
|
||||
from django.conf import settings as s
|
||||
from django.contrib.auth import get_user_model
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
|
||||
from epdb.models import APIToken
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "Create an API token for a user"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
"--username",
|
||||
required=True,
|
||||
help="Username of the user who will own the token",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--name",
|
||||
required=True,
|
||||
help="Descriptive name for the token",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--expires-days",
|
||||
type=int,
|
||||
default=90,
|
||||
help="Days until expiration (0 for no expiration)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--inactive",
|
||||
action="store_true",
|
||||
help="Create the token as inactive",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--curl",
|
||||
action="store_true",
|
||||
help="Print a curl example using the token",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--base-url",
|
||||
default=None,
|
||||
help="Base URL for curl example (default SERVER_URL or http://localhost:8000)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--endpoint",
|
||||
default="/api/v1/compounds/",
|
||||
help="Endpoint path for curl example",
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
username = options["username"]
|
||||
name = options["name"]
|
||||
expires_days = options["expires_days"]
|
||||
|
||||
if expires_days < 0:
|
||||
raise CommandError("--expires-days must be >= 0")
|
||||
|
||||
if expires_days == 0:
|
||||
expires_days = None
|
||||
|
||||
user_model = get_user_model()
|
||||
try:
|
||||
user = user_model.objects.get(username=username)
|
||||
except user_model.DoesNotExist as exc:
|
||||
raise CommandError(f"User not found for username '{username}'") from exc
|
||||
|
||||
token, raw_token = APIToken.create_token(user, name=name, expires_days=expires_days)
|
||||
|
||||
if options["inactive"]:
|
||||
token.is_active = False
|
||||
token.save(update_fields=["is_active"])
|
||||
|
||||
self.stdout.write(f"User: {user.username} ({user.email})")
|
||||
self.stdout.write(f"Token name: {token.name}")
|
||||
self.stdout.write(f"Token id: {token.id}")
|
||||
if token.expires_at:
|
||||
self.stdout.write(f"Expires at: {token.expires_at.isoformat()}")
|
||||
else:
|
||||
self.stdout.write("Expires at: never")
|
||||
self.stdout.write(f"Active: {token.is_active}")
|
||||
self.stdout.write("Raw token:")
|
||||
self.stdout.write(raw_token)
|
||||
|
||||
if options["curl"]:
|
||||
base_url = (
|
||||
options["base_url"] or getattr(s, "SERVER_URL", None) or "http://localhost:8000"
|
||||
)
|
||||
endpoint = options["endpoint"]
|
||||
endpoint = endpoint if endpoint.startswith("/") else f"/{endpoint}"
|
||||
url = f"{base_url.rstrip('/')}{endpoint}"
|
||||
curl_cmd = f'curl -H "Authorization: Bearer {raw_token}" "{url}"'
|
||||
self.stdout.write("Curl:")
|
||||
self.stdout.write(curl_cmd)
|
||||
@ -2,7 +2,9 @@ from django.conf import settings as s
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import transaction
|
||||
|
||||
from epdb.models import MLRelativeReasoning, EnviFormer, Package
|
||||
from epdb.models import EnviFormer, MLRelativeReasoning
|
||||
|
||||
Package = s.GET_PACKAGE_MODEL()
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@ -75,11 +77,13 @@ class Command(BaseCommand):
|
||||
return packages
|
||||
|
||||
# Iteratively create models in options["model_names"]
|
||||
print(f"Creating models: {options['model_names']}\n"
|
||||
f"Data packages: {options['data_packages']}\n"
|
||||
f"Rule Packages (only for MLRR): {options['rule_packages']}\n"
|
||||
f"Eval Packages: {options['eval_packages']}\n"
|
||||
f"Threshold: {options['threshold']:.2f}")
|
||||
print(
|
||||
f"Creating models: {options['model_names']}\n"
|
||||
f"Data packages: {options['data_packages']}\n"
|
||||
f"Rule Packages (only for MLRR): {options['rule_packages']}\n"
|
||||
f"Eval Packages: {options['eval_packages']}\n"
|
||||
f"Threshold: {options['threshold']:.2f}"
|
||||
)
|
||||
data_packages = decode_packages(options["data_packages"])
|
||||
eval_packages = decode_packages(options["eval_packages"])
|
||||
rule_packages = decode_packages(options["rule_packages"])
|
||||
@ -89,22 +93,20 @@ class Command(BaseCommand):
|
||||
model = EnviFormer.create(
|
||||
pack,
|
||||
data_packages=data_packages,
|
||||
eval_packages=eval_packages,
|
||||
threshold=options['threshold'],
|
||||
threshold=options["threshold"],
|
||||
name=f"EnviFormer - {', '.join(options['data_packages'])} - T{options['threshold']:.2f}",
|
||||
description=f"EnviFormer transformer trained on {options['data_packages']} "
|
||||
f"evaluated on {options['eval_packages']}.",
|
||||
f"evaluated on {options['eval_packages']}.",
|
||||
)
|
||||
elif model_name == "mlrr":
|
||||
model = MLRelativeReasoning.create(
|
||||
package=pack,
|
||||
rule_packages=rule_packages,
|
||||
data_packages=data_packages,
|
||||
eval_packages=eval_packages,
|
||||
threshold=options['threshold'],
|
||||
threshold=options["threshold"],
|
||||
name=f"ECC - {', '.join(options['data_packages'])} - T{options['threshold']:.2f}",
|
||||
description=f"ML Relative Reasoning trained on {options['data_packages']} with rules from "
|
||||
f"{options['rule_packages']} and evaluated on {options['eval_packages']}.",
|
||||
f"{options['rule_packages']} and evaluated on {options['eval_packages']}.",
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Cannot create model of type {model_name}, unknown model type")
|
||||
|
||||
@ -47,7 +47,7 @@ class Command(BaseCommand):
|
||||
"description": model.description,
|
||||
"kv": model.kv,
|
||||
"data_packages_uuids": [str(p.uuid) for p in model.data_packages.all()],
|
||||
"eval_packages_uuids": [str(p.uuid) for p in model.data_packages.all()],
|
||||
"eval_packages_uuids": [str(p.uuid) for p in model.eval_packages.all()],
|
||||
"threshold": model.threshold,
|
||||
"eval_results": model.eval_results,
|
||||
"multigen_eval": model.multigen_eval,
|
||||
|
||||
@ -8,7 +8,9 @@ from django.conf import settings as s
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import transaction
|
||||
|
||||
from epdb.models import EnviFormer, Package
|
||||
from epdb.models import EnviFormer
|
||||
|
||||
Package = s.GET_PACKAGE_MODEL()
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
from django.apps import apps
|
||||
from django.conf import settings as s
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from django.db.models import F, Value, TextField, JSONField
|
||||
from django.db.models.functions import Replace, Cast
|
||||
from django.db.models import F, JSONField, TextField, Value
|
||||
from django.db.models.functions import Cast, Replace
|
||||
|
||||
from epdb.models import EnviPathModel
|
||||
|
||||
@ -23,10 +23,12 @@ class Command(BaseCommand):
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
Package = s.GET_PACKAGE_MODEL()
|
||||
Package.objects.update(url=Replace(F("url"), Value(options["old"]), Value(options["new"])))
|
||||
|
||||
MODELS = [
|
||||
"User",
|
||||
"Group",
|
||||
"Package",
|
||||
"Compound",
|
||||
"CompoundStructure",
|
||||
"Pathway",
|
||||
@ -47,7 +49,6 @@ class Command(BaseCommand):
|
||||
]
|
||||
for model in MODELS:
|
||||
obj_cls = apps.get_model("epdb", model)
|
||||
print(f"Localizing urls for {model}")
|
||||
obj_cls.objects.update(
|
||||
url=Replace(F("url"), Value(options["old"]), Value(options["new"]))
|
||||
)
|
||||
|
||||
18
epdb/migrations/0010_license_cc_string.py
Normal file
@ -0,0 +1,18 @@
|
||||
# Generated by Django 5.2.7 on 2025-11-11 14:11
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("epdb", "0009_joblog"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="license",
|
||||
name="cc_string",
|
||||
field=models.TextField(default="by-nc-sa", verbose_name="CC string"),
|
||||
preserve_default=False,
|
||||
),
|
||||
]
|
||||
59
epdb/migrations/0011_auto_20251111_1413.py
Normal file
@ -0,0 +1,59 @@
|
||||
# Generated by Django 5.2.7 on 2025-11-11 14:13
|
||||
|
||||
import re
|
||||
|
||||
from django.contrib.postgres.aggregates import ArrayAgg
|
||||
from django.db import migrations
|
||||
from django.db.models import Min
|
||||
|
||||
|
||||
def set_cc(apps, schema_editor):
|
||||
License = apps.get_model("epdb", "License")
|
||||
|
||||
# For all existing licenses extract cc_string from link
|
||||
for license in License.objects.all():
|
||||
pattern = r"/licenses/([^/]+)/4\.0"
|
||||
match = re.search(pattern, license.link)
|
||||
if match:
|
||||
license.cc_string = match.group(1)
|
||||
license.save()
|
||||
else:
|
||||
raise ValueError(f"Could not find license for {license.link}")
|
||||
|
||||
# Ensure we have all licenses
|
||||
cc_strings = ["by", "by-nc", "by-nc-nd", "by-nc-sa", "by-nd", "by-sa"]
|
||||
for cc_string in cc_strings:
|
||||
if not License.objects.filter(cc_string=cc_string).exists():
|
||||
new_license = License()
|
||||
new_license.cc_string = cc_string
|
||||
new_license.link = f"https://creativecommons.org/licenses/{cc_string}/4.0/"
|
||||
new_license.image_link = f"https://licensebuttons.net/l/{cc_string}/4.0/88x31.png"
|
||||
new_license.save()
|
||||
|
||||
# As we might have existing Licenses representing the same License,
|
||||
# get min pk and all pks as a list
|
||||
license_lookup_qs = License.objects.values("cc_string").annotate(
|
||||
lowest_pk=Min("id"), all_pks=ArrayAgg("id", order_by=("id",))
|
||||
)
|
||||
|
||||
license_lookup = {
|
||||
row["cc_string"]: (row["lowest_pk"], row["all_pks"]) for row in license_lookup_qs
|
||||
}
|
||||
|
||||
Packages = apps.get_model("epdb", "Package")
|
||||
|
||||
for k, v in license_lookup.items():
|
||||
# Set min pk to all packages pointing to any of the duplicates
|
||||
Packages.objects.filter(pk__in=v[1]).update(license_id=v[0])
|
||||
# remove the min pk from "other" pks as we use them for deletion
|
||||
v[1].remove(v[0])
|
||||
# Delete redundant License objects
|
||||
License.objects.filter(pk__in=v[1]).delete()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("epdb", "0010_license_cc_string"),
|
||||
]
|
||||
|
||||
operations = [migrations.RunPython(set_cc)]
|
||||
@ -0,0 +1,22 @@
|
||||
# Generated by Django 5.2.7 on 2025-12-02 13:09
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("epdb", "0011_auto_20251111_1413"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="node",
|
||||
name="stereo_removed",
|
||||
field=models.BooleanField(default=False),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name="pathway",
|
||||
name="predicted",
|
||||
field=models.BooleanField(default=False),
|
||||
),
|
||||
]
|
||||
25
epdb/migrations/0013_setting_expansion_schema.py
Normal file
@ -0,0 +1,25 @@
|
||||
# Generated by Django 5.2.7 on 2025-12-14 11:30
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("epdb", "0012_node_stereo_removed_pathway_predicted"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="setting",
|
||||
name="expansion_schema",
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
("BFS", "Breadth First Search"),
|
||||
("DFS", "Depth First Search"),
|
||||
("GREEDY", "Greedy"),
|
||||
],
|
||||
default="BFS",
|
||||
max_length=20,
|
||||
),
|
||||
),
|
||||
]
|
||||
@ -0,0 +1,17 @@
|
||||
# Generated by Django 5.2.7 on 2025-12-14 16:02
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("epdb", "0013_setting_expansion_schema"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RenameField(
|
||||
model_name="setting",
|
||||
old_name="expansion_schema",
|
||||
new_name="expansion_scheme",
|
||||
),
|
||||
]
|
||||
17
epdb/migrations/0015_user_is_reviewer.py
Normal file
@ -0,0 +1,17 @@
|
||||
# Generated by Django 5.2.7 on 2026-01-19 19:26
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("epdb", "0014_rename_expansion_schema_setting_expansion_scheme"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="user",
|
||||
name="is_reviewer",
|
||||
field=models.BooleanField(default=False),
|
||||
),
|
||||
]
|
||||
873
epdb/models.py
199
epdb/tasks.py
@ -1,19 +1,24 @@
|
||||
import csv
|
||||
import io
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import Any, Callable, List, Optional
|
||||
from uuid import uuid4
|
||||
|
||||
from celery import shared_task
|
||||
from celery.utils.functional import LRUCache
|
||||
from django.conf import settings as s
|
||||
from django.core.mail import EmailMultiAlternatives
|
||||
from django.utils import timezone
|
||||
|
||||
from epdb.logic import SPathway
|
||||
from epdb.models import EPModel, JobLog, Node, Package, Pathway, Rule, Setting, User, Edge
|
||||
from epdb.models import Edge, EPModel, JobLog, Node, Pathway, Rule, Setting, User
|
||||
from utilities.chem import FormatConverter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
ML_CACHE = LRUCache(3) # Cache the three most recent ML models to reduce load times.
|
||||
|
||||
Package = s.GET_PACKAGE_MODEL()
|
||||
|
||||
|
||||
def get_ml_model(model_pk: int):
|
||||
if model_pk not in ML_CACHE:
|
||||
@ -29,11 +34,11 @@ def dispatch_eager(user: "User", job: Callable, *args, **kwargs):
|
||||
log.task_id = uuid4()
|
||||
log.job_name = job.__name__
|
||||
log.status = "SUCCESS"
|
||||
log.done_at = datetime.now()
|
||||
log.done_at = timezone.now()
|
||||
log.task_result = str(x) if x else None
|
||||
log.save()
|
||||
|
||||
return x
|
||||
return log, x
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
raise e
|
||||
@ -49,7 +54,7 @@ def dispatch(user: "User", job: Callable, *args, **kwargs):
|
||||
log.status = "INITIAL"
|
||||
log.save()
|
||||
|
||||
return x.result
|
||||
return log
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
raise e
|
||||
@ -69,7 +74,31 @@ def predict_simple(model_pk: int, smiles: str):
|
||||
|
||||
@shared_task(queue="background")
|
||||
def send_registration_mail(user_pk: int):
|
||||
pass
|
||||
u = User.objects.get(id=user_pk)
|
||||
|
||||
tpl = """Welcome {username}!,
|
||||
|
||||
Thank you for your interest in enviPath.
|
||||
|
||||
The public system is intended for non-commercial use only.
|
||||
We will review your account details and usually activate your account within 24 hours.
|
||||
Once activated, you will be notified by email.
|
||||
|
||||
If we have any questions, we will contact you at this email address.
|
||||
|
||||
Best regards,
|
||||
|
||||
enviPath team"""
|
||||
|
||||
msg = EmailMultiAlternatives(
|
||||
"Your enviPath account",
|
||||
tpl.format(username=u.username),
|
||||
"admin@envipath.org",
|
||||
[u.email],
|
||||
bcc=["admin@envipath.org"],
|
||||
)
|
||||
|
||||
msg.send(fail_silently=False)
|
||||
|
||||
|
||||
@shared_task(bind=True, queue="model")
|
||||
@ -136,14 +165,25 @@ def predict(
|
||||
pred_setting_pk: int,
|
||||
limit: Optional[int] = None,
|
||||
node_pk: Optional[int] = None,
|
||||
setting_overrides: Optional[dict] = None,
|
||||
) -> Pathway:
|
||||
pw = Pathway.objects.get(id=pw_pk)
|
||||
setting = Setting.objects.get(id=pred_setting_pk)
|
||||
|
||||
if setting_overrides:
|
||||
for k, v in setting_overrides.items():
|
||||
setattr(setting, k, v)
|
||||
|
||||
# If the setting has a model add/restore it from the cache
|
||||
if setting.model is not None:
|
||||
setting.model = get_ml_model(setting.model.pk)
|
||||
|
||||
pw.kv.update(**{"status": "running"})
|
||||
kv = {"status": "running"}
|
||||
|
||||
if setting_overrides:
|
||||
kv["setting_overrides"] = setting_overrides
|
||||
|
||||
pw.kv.update(**kv)
|
||||
pw.save()
|
||||
|
||||
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||
@ -168,10 +208,12 @@ def predict(
|
||||
spw = SPathway.from_pathway(pw)
|
||||
spw.predict_step(from_node=n)
|
||||
else:
|
||||
raise ValueError("Neither limit nor node_pk given!")
|
||||
spw = SPathway(prediction_setting=setting, persist=pw)
|
||||
spw.predict()
|
||||
|
||||
except Exception as e:
|
||||
pw.kv.update({"status": "failed"})
|
||||
pw.kv.update(**{"error": str(e)})
|
||||
pw.save()
|
||||
|
||||
if JobLog.objects.filter(task_id=self.request.id).exists():
|
||||
@ -281,3 +323,144 @@ def identify_missing_rules(
|
||||
buffer.seek(0)
|
||||
|
||||
return buffer.getvalue()
|
||||
|
||||
|
||||
@shared_task(bind=True, queue="background")
|
||||
def engineer_pathways(self, pw_pks: List[int], setting_pk: int, target_package_pk: int):
|
||||
from utilities.misc import PathwayUtils
|
||||
|
||||
setting = Setting.objects.get(pk=setting_pk)
|
||||
# Temporarily set model_threshold to 0.0 to keep all tps
|
||||
setting.model_threshold = 0.0
|
||||
|
||||
target = Package.objects.get(pk=target_package_pk)
|
||||
|
||||
intermediate_pathways = []
|
||||
predicted_pathways = []
|
||||
|
||||
for pw in Pathway.objects.filter(pk__in=pw_pks):
|
||||
pu = PathwayUtils(pw)
|
||||
|
||||
eng_pw, node_to_snode_mapping, intermediates = pu.engineer(setting)
|
||||
|
||||
# If we've found intermediates, do the following
|
||||
# - Get a copy of the original pathway and add intermediates
|
||||
# - Store the predicted pathway for further investigation
|
||||
if len(intermediates):
|
||||
copy_mapping = {}
|
||||
copied_pw = pw.copy(target, copy_mapping)
|
||||
copied_pw.name = f"{copied_pw.name} (Engineered)"
|
||||
copied_pw.description = f"The original Pathway can be found here: {pw.url}"
|
||||
copied_pw.save()
|
||||
|
||||
for inter in intermediates:
|
||||
start = copy_mapping[inter[0]]
|
||||
end = copy_mapping[inter[1]]
|
||||
start_snode = inter[2]
|
||||
end_snode = inter[3]
|
||||
for idx, intermediate_edge in enumerate(inter[4]):
|
||||
smiles_to_node = {}
|
||||
|
||||
snodes_to_create = list(
|
||||
set(intermediate_edge.educts + intermediate_edge.products)
|
||||
)
|
||||
|
||||
for snode in snodes_to_create:
|
||||
if snode == start_snode or snode == end_snode:
|
||||
smiles_to_node[snode.smiles] = start if snode == start_snode else end
|
||||
continue
|
||||
|
||||
if snode.smiles not in smiles_to_node:
|
||||
n = Node.create(copied_pw, smiles=snode.smiles, depth=snode.depth)
|
||||
# Used in viz to highlight intermediates
|
||||
n.kv.update({"is_engineered_intermediate": True})
|
||||
n.save()
|
||||
smiles_to_node[snode.smiles] = n
|
||||
|
||||
Edge.create(
|
||||
copied_pw,
|
||||
[smiles_to_node[educt.smiles] for educt in intermediate_edge.educts],
|
||||
[smiles_to_node[product.smiles] for product in intermediate_edge.products],
|
||||
rule=intermediate_edge.rule,
|
||||
)
|
||||
|
||||
# Persist the predicted pathway
|
||||
pred_pw = pu.spathway_to_pathway(target, eng_pw, name=f"{pw.name} (Predicted)")
|
||||
|
||||
intermediate_pathways.append(copied_pw.url)
|
||||
predicted_pathways.append(pred_pw.url)
|
||||
|
||||
return intermediate_pathways, predicted_pathways
|
||||
|
||||
|
||||
@shared_task(bind=True, queue="background")
|
||||
def batch_predict(
|
||||
self,
|
||||
substrates: List[str] | List[List[str]],
|
||||
prediction_setting_pk: int,
|
||||
target_package_pk: int,
|
||||
num_tps: int = 50,
|
||||
):
|
||||
target_package = Package.objects.get(pk=target_package_pk)
|
||||
prediction_setting = Setting.objects.get(pk=prediction_setting_pk)
|
||||
|
||||
if len(substrates) == 0:
|
||||
raise ValueError("No substrates given!")
|
||||
|
||||
is_pair = isinstance(substrates[0], list)
|
||||
|
||||
substrate_and_names = []
|
||||
if not is_pair:
|
||||
for sub in substrates:
|
||||
substrate_and_names.append([sub, None])
|
||||
else:
|
||||
substrate_and_names = substrates
|
||||
|
||||
# Check prerequisite that we can standardize all substrates
|
||||
standardized_substrates_and_smiles = []
|
||||
for substrate in substrate_and_names:
|
||||
try:
|
||||
stand_smiles = FormatConverter.standardize(substrate[0])
|
||||
standardized_substrates_and_smiles.append([stand_smiles, substrate[1]])
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
f'Pathway prediction failed as standardization of SMILES "{substrate}" failed!'
|
||||
)
|
||||
|
||||
pathways = []
|
||||
|
||||
for pair in standardized_substrates_and_smiles:
|
||||
pw = Pathway.create(
|
||||
target_package,
|
||||
pair[0],
|
||||
name=pair[1],
|
||||
predicted=True,
|
||||
)
|
||||
|
||||
# set mode and setting
|
||||
pw.setting = prediction_setting
|
||||
pw.kv.update({"mode": "predict"})
|
||||
pw.save()
|
||||
|
||||
predict(
|
||||
pw.pk,
|
||||
prediction_setting.pk,
|
||||
limit=None,
|
||||
setting_overrides={
|
||||
"max_nodes": num_tps,
|
||||
"max_depth": num_tps,
|
||||
"model_threshold": 0.001,
|
||||
},
|
||||
)
|
||||
|
||||
pathways.append(pw)
|
||||
|
||||
buffer = io.StringIO()
|
||||
|
||||
for idx, pw in enumerate(pathways):
|
||||
# Carry out header only for the first pathway
|
||||
buffer.write(pw.to_csv(include_header=idx == 0, include_pathway_url=True))
|
||||
|
||||
buffer.seek(0)
|
||||
|
||||
return buffer.getvalue()
|
||||
|
||||
18
epdb/urls.py
@ -48,6 +48,8 @@ urlpatterns = [
|
||||
re_path(r"^user$", v.users, name="users"),
|
||||
re_path(r"^group$", v.groups, name="groups"),
|
||||
re_path(r"^search$", v.search, name="search"),
|
||||
re_path(r"^predict$", v.predict_pathway, name="predict_pathway"),
|
||||
re_path(r"^batch-predict$", v.batch_predict_pathway, name="batch_predict_pathway"),
|
||||
# User Detail
|
||||
re_path(rf"^user/(?P<user_uuid>{UUID})", v.user, name="user"),
|
||||
# Group Detail
|
||||
@ -141,6 +143,11 @@ urlpatterns = [
|
||||
v.package_pathway,
|
||||
name="package pathway detail",
|
||||
),
|
||||
re_path(
|
||||
rf"^package/(?P<package_uuid>{UUID})/predict$",
|
||||
v.package_predict_pathway,
|
||||
name="package predict pathway",
|
||||
),
|
||||
# Pathway Nodes
|
||||
re_path(
|
||||
rf"^package/(?P<package_uuid>{UUID})/pathway/(?P<pathway_uuid>{UUID})/node$",
|
||||
@ -190,7 +197,16 @@ urlpatterns = [
|
||||
re_path(r"^indigo/dearomatize$", v.dearomatize, name="indigo_dearomatize"),
|
||||
re_path(r"^indigo/layout$", v.layout, name="indigo_layout"),
|
||||
re_path(r"^depict$", v.depict, name="depict"),
|
||||
re_path(r"^jobs", v.jobs, name="jobs"),
|
||||
path("jobs", v.jobs, name="jobs"),
|
||||
path("jobs/<uuid:job_uuid>", v.job, name="job detail"),
|
||||
# OAuth Stuff
|
||||
path("o/userinfo/", v.userinfo, name="oauth_userinfo"),
|
||||
# Static Pages
|
||||
re_path(r"^terms$", v.static_terms_of_use, name="terms_of_use"),
|
||||
re_path(r"^privacy$", v.static_privacy_policy, name="privacy_policy"),
|
||||
re_path(r"^cookie-policy$", v.static_cookie_policy, name="cookie_policy"),
|
||||
re_path(r"^about$", v.static_about_us, name="about_us"),
|
||||
re_path(r"^contact$", v.static_contact_support, name="contact_support"),
|
||||
re_path(r"^careers$", v.static_careers, name="careers"),
|
||||
re_path(r"^cite$", v.static_cite, name="cite"),
|
||||
]
|
||||
|
||||
1096
epdb/views.py
@ -1,24 +1,21 @@
|
||||
import gzip
|
||||
import json
|
||||
import logging
|
||||
import os.path
|
||||
from datetime import datetime
|
||||
|
||||
from django.conf import settings as s
|
||||
from django.http import HttpResponseNotAllowed
|
||||
from django.shortcuts import render
|
||||
|
||||
from epdb.logic import PackageManager
|
||||
from epdb.models import Rule, SimpleAmbitRule, Package, CompoundStructure
|
||||
from epdb.views import get_base_context, _anonymous_or_real
|
||||
from utilities.chem import FormatConverter
|
||||
|
||||
|
||||
from rdkit import Chem
|
||||
from rdkit.Chem.MolStandardize import rdMolStandardize
|
||||
|
||||
from epdb.models import CompoundStructure, Rule, SimpleAmbitRule
|
||||
from epdb.views import get_base_context
|
||||
from utilities.chem import FormatConverter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
Package = s.GET_PACKAGE_MODEL()
|
||||
|
||||
|
||||
def normalize_smiles(smiles):
|
||||
m1 = Chem.MolFromSmiles(smiles)
|
||||
@ -59,9 +56,7 @@ def run_both_engines(SMILES, SMIRKS):
|
||||
set(
|
||||
[
|
||||
normalize_smiles(str(x))
|
||||
for x in FormatConverter.sanitize_smiles(
|
||||
[str(s) for s in all_rdkit_prods]
|
||||
)[0]
|
||||
for x in FormatConverter.sanitize_smiles([str(s) for s in all_rdkit_prods])[0]
|
||||
]
|
||||
)
|
||||
)
|
||||
@ -81,12 +76,9 @@ def migration(request):
|
||||
open(s.BASE_DIR / "fixtures" / "migration_status_per_rule.json")
|
||||
)
|
||||
else:
|
||||
BBD = Package.objects.get(
|
||||
url="http://localhost:8000/package/32de3cf4-e3e6-4168-956e-32fa5ddb0ce1"
|
||||
)
|
||||
BBD = Package.objects.get(uuid="32de3cf4-e3e6-4168-956e-32fa5ddb0ce1")
|
||||
ALL_SMILES = [
|
||||
cs.smiles
|
||||
for cs in CompoundStructure.objects.filter(compound__package=BBD)
|
||||
cs.smiles for cs in CompoundStructure.objects.filter(compound__package=BBD)
|
||||
]
|
||||
RULES = SimpleAmbitRule.objects.filter(package=BBD)
|
||||
|
||||
@ -142,9 +134,7 @@ def migration(request):
|
||||
)
|
||||
|
||||
for r in migration_status["results"]:
|
||||
r["detail_url"] = r["detail_url"].replace(
|
||||
"http://localhost:8000", s.SERVER_URL
|
||||
)
|
||||
r["detail_url"] = r["detail_url"].replace("http://localhost:8000", s.SERVER_URL)
|
||||
|
||||
context.update(**migration_status)
|
||||
|
||||
@ -152,12 +142,10 @@ def migration(request):
|
||||
|
||||
|
||||
def migration_detail(request, package_uuid, rule_uuid):
|
||||
current_user = _anonymous_or_real(request)
|
||||
|
||||
if request.method == "GET":
|
||||
context = get_base_context(request)
|
||||
|
||||
BBD = Package.objects.get(name="EAWAG-BBD")
|
||||
BBD = Package.objects.get(uuid="32de3cf4-e3e6-4168-956e-32fa5ddb0ce1")
|
||||
STRUCTURES = CompoundStructure.objects.filter(compound__package=BBD)
|
||||
rule = Rule.objects.get(package=BBD, uuid=rule_uuid)
|
||||
|
||||
@ -235,9 +223,7 @@ def compare(request):
|
||||
context["smirks"] = (
|
||||
"[#1,#6:6][#7;X3;!$(NC1CC1)!$([N][C]=O)!$([!#8]CNC=O):1]([#1,#6:7])[#6;A;X4:2][H:3]>>[#1,#6:6][#7;X3:1]([#1,#6:7])[H:3].[#6;A:2]=O"
|
||||
)
|
||||
context["smiles"] = (
|
||||
"C(CC(=O)N[C@@H](CS[Se-])C(=O)NCC(=O)[O-])[C@@H](C(=O)[O-])N"
|
||||
)
|
||||
context["smiles"] = "C(CC(=O)N[C@@H](CS[Se-])C(=O)NCC(=O)[O-])[C@@H](C(=O)[O-])N"
|
||||
return render(request, "compare.html", context)
|
||||
|
||||
elif request.method == "POST":
|
||||
|
||||
25
package.json
Normal file
@ -0,0 +1,25 @@
|
||||
{
|
||||
"name": "envipy",
|
||||
"version": "1.0.0",
|
||||
"private": true,
|
||||
"description": "enviPath UI - Tailwind CSS + DaisyUI",
|
||||
"scripts": {
|
||||
"dev": "tailwindcss -i static/css/input.css -o static/css/output.css --watch=always",
|
||||
"build": "tailwindcss -i static/css/input.css -o static/css/output.css --minify"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@tailwindcss/cli": "^4.1.18",
|
||||
"@tailwindcss/postcss": "^4.1.18",
|
||||
"daisyui": "^5.5.14",
|
||||
"postcss": "^8.5.6",
|
||||
"prettier": "^3.7.4",
|
||||
"prettier-plugin-jinja-template": "^2.1.0",
|
||||
"prettier-plugin-tailwindcss": "^0.7.2",
|
||||
"tailwindcss": "^4.1.18"
|
||||
},
|
||||
"keywords": [
|
||||
"django",
|
||||
"tailwindcss",
|
||||
"daisyui"
|
||||
]
|
||||
}
|
||||
744
pnpm-lock.yaml
generated
@ -1,4 +1,740 @@
|
||||
lockfileVersion: 6.0
|
||||
specifiers: {}
|
||||
dependencies: {}
|
||||
packages: {}
|
||||
lockfileVersion: '9.0'
|
||||
|
||||
settings:
|
||||
autoInstallPeers: true
|
||||
excludeLinksFromLockfile: false
|
||||
|
||||
importers:
|
||||
|
||||
.:
|
||||
devDependencies:
|
||||
'@tailwindcss/cli':
|
||||
specifier: ^4.1.18
|
||||
version: 4.1.18
|
||||
'@tailwindcss/postcss':
|
||||
specifier: ^4.1.18
|
||||
version: 4.1.18
|
||||
daisyui:
|
||||
specifier: ^5.5.14
|
||||
version: 5.5.14
|
||||
postcss:
|
||||
specifier: ^8.5.6
|
||||
version: 8.5.6
|
||||
prettier:
|
||||
specifier: ^3.7.4
|
||||
version: 3.7.4
|
||||
prettier-plugin-jinja-template:
|
||||
specifier: ^2.1.0
|
||||
version: 2.1.0(prettier@3.7.4)
|
||||
prettier-plugin-tailwindcss:
|
||||
specifier: ^0.7.2
|
||||
version: 0.7.2(prettier@3.7.4)
|
||||
tailwindcss:
|
||||
specifier: ^4.1.18
|
||||
version: 4.1.18
|
||||
|
||||
packages:
|
||||
|
||||
'@alloc/quick-lru@5.2.0':
|
||||
resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==}
|
||||
engines: {node: '>=10'}
|
||||
|
||||
'@jridgewell/gen-mapping@0.3.13':
|
||||
resolution: {integrity: sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==}
|
||||
|
||||
'@jridgewell/remapping@2.3.5':
|
||||
resolution: {integrity: sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==}
|
||||
|
||||
'@jridgewell/resolve-uri@3.1.2':
|
||||
resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==}
|
||||
engines: {node: '>=6.0.0'}
|
||||
|
||||
'@jridgewell/sourcemap-codec@1.5.5':
|
||||
resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==}
|
||||
|
||||
'@jridgewell/trace-mapping@0.3.31':
|
||||
resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==}
|
||||
|
||||
'@parcel/watcher-android-arm64@2.5.1':
|
||||
resolution: {integrity: sha512-KF8+j9nNbUN8vzOFDpRMsaKBHZ/mcjEjMToVMJOhTozkDonQFFrRcfdLWn6yWKCmJKmdVxSgHiYvTCef4/qcBA==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [android]
|
||||
|
||||
'@parcel/watcher-darwin-arm64@2.5.1':
|
||||
resolution: {integrity: sha512-eAzPv5osDmZyBhou8PoF4i6RQXAfeKL9tjb3QzYuccXFMQU0ruIc/POh30ePnaOyD1UXdlKguHBmsTs53tVoPw==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
|
||||
'@parcel/watcher-darwin-x64@2.5.1':
|
||||
resolution: {integrity: sha512-1ZXDthrnNmwv10A0/3AJNZ9JGlzrF82i3gNQcWOzd7nJ8aj+ILyW1MTxVk35Db0u91oD5Nlk9MBiujMlwmeXZg==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
|
||||
'@parcel/watcher-freebsd-x64@2.5.1':
|
||||
resolution: {integrity: sha512-SI4eljM7Flp9yPuKi8W0ird8TI/JK6CSxju3NojVI6BjHsTyK7zxA9urjVjEKJ5MBYC+bLmMcbAWlZ+rFkLpJQ==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [x64]
|
||||
os: [freebsd]
|
||||
|
||||
'@parcel/watcher-linux-arm-glibc@2.5.1':
|
||||
resolution: {integrity: sha512-RCdZlEyTs8geyBkkcnPWvtXLY44BCeZKmGYRtSgtwwnHR4dxfHRG3gR99XdMEdQ7KeiDdasJwwvNSF5jKtDwdA==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
|
||||
'@parcel/watcher-linux-arm-musl@2.5.1':
|
||||
resolution: {integrity: sha512-6E+m/Mm1t1yhB8X412stiKFG3XykmgdIOqhjWj+VL8oHkKABfu/gjFj8DvLrYVHSBNC+/u5PeNrujiSQ1zwd1Q==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
|
||||
'@parcel/watcher-linux-arm64-glibc@2.5.1':
|
||||
resolution: {integrity: sha512-LrGp+f02yU3BN9A+DGuY3v3bmnFUggAITBGriZHUREfNEzZh/GO06FF5u2kx8x+GBEUYfyTGamol4j3m9ANe8w==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@parcel/watcher-linux-arm64-musl@2.5.1':
|
||||
resolution: {integrity: sha512-cFOjABi92pMYRXS7AcQv9/M1YuKRw8SZniCDw0ssQb/noPkRzA+HBDkwmyOJYp5wXcsTrhxO0zq1U11cK9jsFg==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@parcel/watcher-linux-x64-glibc@2.5.1':
|
||||
resolution: {integrity: sha512-GcESn8NZySmfwlTsIur+49yDqSny2IhPeZfXunQi48DMugKeZ7uy1FX83pO0X22sHntJ4Ub+9k34XQCX+oHt2A==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@parcel/watcher-linux-x64-musl@2.5.1':
|
||||
resolution: {integrity: sha512-n0E2EQbatQ3bXhcH2D1XIAANAcTZkQICBPVaxMeaCVBtOpBZpWJuf7LwyWPSBDITb7In8mqQgJ7gH8CILCURXg==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@parcel/watcher-win32-arm64@2.5.1':
|
||||
resolution: {integrity: sha512-RFzklRvmc3PkjKjry3hLF9wD7ppR4AKcWNzH7kXR7GUe0Igb3Nz8fyPwtZCSquGrhU5HhUNDr/mKBqj7tqA2Vw==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [win32]
|
||||
|
||||
'@parcel/watcher-win32-ia32@2.5.1':
|
||||
resolution: {integrity: sha512-c2KkcVN+NJmuA7CGlaGD1qJh1cLfDnQsHjE89E60vUEMlqduHGCdCLJCID5geFVM0dOtA3ZiIO8BoEQmzQVfpQ==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [ia32]
|
||||
os: [win32]
|
||||
|
||||
'@parcel/watcher-win32-x64@2.5.1':
|
||||
resolution: {integrity: sha512-9lHBdJITeNR++EvSQVUcaZoWupyHfXe1jZvGZ06O/5MflPcuPLtEphScIBL+AiCWBO46tDSHzWyD0uDmmZqsgA==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
|
||||
'@parcel/watcher@2.5.1':
|
||||
resolution: {integrity: sha512-dfUnCxiN9H4ap84DvD2ubjw+3vUNpstxa0TneY/Paat8a3R4uQZDLSvWjmznAY/DoahqTHl9V46HF/Zs3F29pg==}
|
||||
engines: {node: '>= 10.0.0'}
|
||||
|
||||
'@tailwindcss/cli@4.1.18':
|
||||
resolution: {integrity: sha512-sMZ+lZbDyxwjD2E0L7oRUjJ01Ffjtme5OtjvvnC+cV4CEDcbqzbp25TCpxHj6kWLU9+DlqJOiNgSOgctC2aZmg==}
|
||||
hasBin: true
|
||||
|
||||
'@tailwindcss/node@4.1.18':
|
||||
resolution: {integrity: sha512-DoR7U1P7iYhw16qJ49fgXUlry1t4CpXeErJHnQ44JgTSKMaZUdf17cfn5mHchfJ4KRBZRFA/Coo+MUF5+gOaCQ==}
|
||||
|
||||
'@tailwindcss/oxide-android-arm64@4.1.18':
|
||||
resolution: {integrity: sha512-dJHz7+Ugr9U/diKJA0W6N/6/cjI+ZTAoxPf9Iz9BFRF2GzEX8IvXxFIi/dZBloVJX/MZGvRuFA9rqwdiIEZQ0Q==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [android]
|
||||
|
||||
'@tailwindcss/oxide-darwin-arm64@4.1.18':
|
||||
resolution: {integrity: sha512-Gc2q4Qhs660bhjyBSKgq6BYvwDz4G+BuyJ5H1xfhmDR3D8HnHCmT/BSkvSL0vQLy/nkMLY20PQ2OoYMO15Jd0A==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
|
||||
'@tailwindcss/oxide-darwin-x64@4.1.18':
|
||||
resolution: {integrity: sha512-FL5oxr2xQsFrc3X9o1fjHKBYBMD1QZNyc1Xzw/h5Qu4XnEBi3dZn96HcHm41c/euGV+GRiXFfh2hUCyKi/e+yw==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
|
||||
'@tailwindcss/oxide-freebsd-x64@4.1.18':
|
||||
resolution: {integrity: sha512-Fj+RHgu5bDodmV1dM9yAxlfJwkkWvLiRjbhuO2LEtwtlYlBgiAT4x/j5wQr1tC3SANAgD+0YcmWVrj8R9trVMA==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [freebsd]
|
||||
|
||||
'@tailwindcss/oxide-linux-arm-gnueabihf@4.1.18':
|
||||
resolution: {integrity: sha512-Fp+Wzk/Ws4dZn+LV2Nqx3IilnhH51YZoRaYHQsVq3RQvEl+71VGKFpkfHrLM/Li+kt5c0DJe/bHXK1eHgDmdiA==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
|
||||
'@tailwindcss/oxide-linux-arm64-gnu@4.1.18':
|
||||
resolution: {integrity: sha512-S0n3jboLysNbh55Vrt7pk9wgpyTTPD0fdQeh7wQfMqLPM/Hrxi+dVsLsPrycQjGKEQk85Kgbx+6+QnYNiHalnw==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@tailwindcss/oxide-linux-arm64-musl@4.1.18':
|
||||
resolution: {integrity: sha512-1px92582HkPQlaaCkdRcio71p8bc8i/ap5807tPRDK/uw953cauQBT8c5tVGkOwrHMfc2Yh6UuxaH4vtTjGvHg==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@tailwindcss/oxide-linux-x64-gnu@4.1.18':
|
||||
resolution: {integrity: sha512-v3gyT0ivkfBLoZGF9LyHmts0Isc8jHZyVcbzio6Wpzifg/+5ZJpDiRiUhDLkcr7f/r38SWNe7ucxmGW3j3Kb/g==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@tailwindcss/oxide-linux-x64-musl@4.1.18':
|
||||
resolution: {integrity: sha512-bhJ2y2OQNlcRwwgOAGMY0xTFStt4/wyU6pvI6LSuZpRgKQwxTec0/3Scu91O8ir7qCR3AuepQKLU/kX99FouqQ==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@tailwindcss/oxide-wasm32-wasi@4.1.18':
|
||||
resolution: {integrity: sha512-LffYTvPjODiP6PT16oNeUQJzNVyJl1cjIebq/rWWBF+3eDst5JGEFSc5cWxyRCJ0Mxl+KyIkqRxk1XPEs9x8TA==}
|
||||
engines: {node: '>=14.0.0'}
|
||||
cpu: [wasm32]
|
||||
bundledDependencies:
|
||||
- '@napi-rs/wasm-runtime'
|
||||
- '@emnapi/core'
|
||||
- '@emnapi/runtime'
|
||||
- '@tybys/wasm-util'
|
||||
- '@emnapi/wasi-threads'
|
||||
- tslib
|
||||
|
||||
'@tailwindcss/oxide-win32-arm64-msvc@4.1.18':
|
||||
resolution: {integrity: sha512-HjSA7mr9HmC8fu6bdsZvZ+dhjyGCLdotjVOgLA2vEqxEBZaQo9YTX4kwgEvPCpRh8o4uWc4J/wEoFzhEmjvPbA==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [arm64]
|
||||
os: [win32]
|
||||
|
||||
'@tailwindcss/oxide-win32-x64-msvc@4.1.18':
|
||||
resolution: {integrity: sha512-bJWbyYpUlqamC8dpR7pfjA0I7vdF6t5VpUGMWRkXVE3AXgIZjYUYAK7II1GNaxR8J1SSrSrppRar8G++JekE3Q==}
|
||||
engines: {node: '>= 10'}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
|
||||
'@tailwindcss/oxide@4.1.18':
|
||||
resolution: {integrity: sha512-EgCR5tTS5bUSKQgzeMClT6iCY3ToqE1y+ZB0AKldj809QXk1Y+3jB0upOYZrn9aGIzPtUsP7sX4QQ4XtjBB95A==}
|
||||
engines: {node: '>= 10'}
|
||||
|
||||
'@tailwindcss/postcss@4.1.18':
|
||||
resolution: {integrity: sha512-Ce0GFnzAOuPyfV5SxjXGn0CubwGcuDB0zcdaPuCSzAa/2vII24JTkH+I6jcbXLb1ctjZMZZI6OjDaLPJQL1S0g==}
|
||||
|
||||
braces@3.0.3:
|
||||
resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==}
|
||||
engines: {node: '>=8'}
|
||||
|
||||
daisyui@5.5.14:
|
||||
resolution: {integrity: sha512-L47rvw7I7hK68TA97VB8Ee0woHew+/ohR6Lx6Ah/krfISOqcG4My7poNpX5Mo5/ytMxiR40fEaz6njzDi7cuSg==}
|
||||
|
||||
detect-libc@1.0.3:
|
||||
resolution: {integrity: sha512-pGjwhsmsp4kL2RTz08wcOlGN83otlqHeD/Z5T8GXZB+/YcpQ/dgo+lbU8ZsGxV0HIvqqxo9l7mqYwyYMD9bKDg==}
|
||||
engines: {node: '>=0.10'}
|
||||
hasBin: true
|
||||
|
||||
detect-libc@2.1.2:
|
||||
resolution: {integrity: sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==}
|
||||
engines: {node: '>=8'}
|
||||
|
||||
enhanced-resolve@5.18.4:
|
||||
resolution: {integrity: sha512-LgQMM4WXU3QI+SYgEc2liRgznaD5ojbmY3sb8LxyguVkIg5FxdpTkvk72te2R38/TGKxH634oLxXRGY6d7AP+Q==}
|
||||
engines: {node: '>=10.13.0'}
|
||||
|
||||
fill-range@7.1.1:
|
||||
resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==}
|
||||
engines: {node: '>=8'}
|
||||
|
||||
graceful-fs@4.2.11:
|
||||
resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==}
|
||||
|
||||
is-extglob@2.1.1:
|
||||
resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==}
|
||||
engines: {node: '>=0.10.0'}
|
||||
|
||||
is-glob@4.0.3:
|
||||
resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==}
|
||||
engines: {node: '>=0.10.0'}
|
||||
|
||||
is-number@7.0.0:
|
||||
resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==}
|
||||
engines: {node: '>=0.12.0'}
|
||||
|
||||
jiti@2.6.1:
|
||||
resolution: {integrity: sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==}
|
||||
hasBin: true
|
||||
|
||||
lightningcss-android-arm64@1.30.2:
|
||||
resolution: {integrity: sha512-BH9sEdOCahSgmkVhBLeU7Hc9DWeZ1Eb6wNS6Da8igvUwAe0sqROHddIlvU06q3WyXVEOYDZ6ykBZQnjTbmo4+A==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [android]
|
||||
|
||||
lightningcss-darwin-arm64@1.30.2:
|
||||
resolution: {integrity: sha512-ylTcDJBN3Hp21TdhRT5zBOIi73P6/W0qwvlFEk22fkdXchtNTOU4Qc37SkzV+EKYxLouZ6M4LG9NfZ1qkhhBWA==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
|
||||
lightningcss-darwin-x64@1.30.2:
|
||||
resolution: {integrity: sha512-oBZgKchomuDYxr7ilwLcyms6BCyLn0z8J0+ZZmfpjwg9fRVZIR5/GMXd7r9RH94iDhld3UmSjBM6nXWM2TfZTQ==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
|
||||
lightningcss-freebsd-x64@1.30.2:
|
||||
resolution: {integrity: sha512-c2bH6xTrf4BDpK8MoGG4Bd6zAMZDAXS569UxCAGcA7IKbHNMlhGQ89eRmvpIUGfKWNVdbhSbkQaWhEoMGmGslA==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [x64]
|
||||
os: [freebsd]
|
||||
|
||||
lightningcss-linux-arm-gnueabihf@1.30.2:
|
||||
resolution: {integrity: sha512-eVdpxh4wYcm0PofJIZVuYuLiqBIakQ9uFZmipf6LF/HRj5Bgm0eb3qL/mr1smyXIS1twwOxNWndd8z0E374hiA==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
|
||||
lightningcss-linux-arm64-gnu@1.30.2:
|
||||
resolution: {integrity: sha512-UK65WJAbwIJbiBFXpxrbTNArtfuznvxAJw4Q2ZGlU8kPeDIWEX1dg3rn2veBVUylA2Ezg89ktszWbaQnxD/e3A==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
lightningcss-linux-arm64-musl@1.30.2:
|
||||
resolution: {integrity: sha512-5Vh9dGeblpTxWHpOx8iauV02popZDsCYMPIgiuw97OJ5uaDsL86cnqSFs5LZkG3ghHoX5isLgWzMs+eD1YzrnA==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
lightningcss-linux-x64-gnu@1.30.2:
|
||||
resolution: {integrity: sha512-Cfd46gdmj1vQ+lR6VRTTadNHu6ALuw2pKR9lYq4FnhvgBc4zWY1EtZcAc6EffShbb1MFrIPfLDXD6Xprbnni4w==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
lightningcss-linux-x64-musl@1.30.2:
|
||||
resolution: {integrity: sha512-XJaLUUFXb6/QG2lGIW6aIk6jKdtjtcffUT0NKvIqhSBY3hh9Ch+1LCeH80dR9q9LBjG3ewbDjnumefsLsP6aiA==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
lightningcss-win32-arm64-msvc@1.30.2:
|
||||
resolution: {integrity: sha512-FZn+vaj7zLv//D/192WFFVA0RgHawIcHqLX9xuWiQt7P0PtdFEVaxgF9rjM/IRYHQXNnk61/H/gb2Ei+kUQ4xQ==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [arm64]
|
||||
os: [win32]
|
||||
|
||||
lightningcss-win32-x64-msvc@1.30.2:
|
||||
resolution: {integrity: sha512-5g1yc73p+iAkid5phb4oVFMB45417DkRevRbt/El/gKXJk4jid+vPFF/AXbxn05Aky8PapwzZrdJShv5C0avjw==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
|
||||
lightningcss@1.30.2:
|
||||
resolution: {integrity: sha512-utfs7Pr5uJyyvDETitgsaqSyjCb2qNRAtuqUeWIAKztsOYdcACf2KtARYXg2pSvhkt+9NfoaNY7fxjl6nuMjIQ==}
|
||||
engines: {node: '>= 12.0.0'}
|
||||
|
||||
magic-string@0.30.21:
|
||||
resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==}
|
||||
|
||||
micromatch@4.0.8:
|
||||
resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==}
|
||||
engines: {node: '>=8.6'}
|
||||
|
||||
mri@1.2.0:
|
||||
resolution: {integrity: sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==}
|
||||
engines: {node: '>=4'}
|
||||
|
||||
nanoid@3.3.11:
|
||||
resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==}
|
||||
engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
|
||||
hasBin: true
|
||||
|
||||
node-addon-api@7.1.1:
|
||||
resolution: {integrity: sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==}
|
||||
|
||||
picocolors@1.1.1:
|
||||
resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==}
|
||||
|
||||
picomatch@2.3.1:
|
||||
resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==}
|
||||
engines: {node: '>=8.6'}
|
||||
|
||||
postcss@8.5.6:
|
||||
resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==}
|
||||
engines: {node: ^10 || ^12 || >=14}
|
||||
|
||||
prettier-plugin-jinja-template@2.1.0:
|
||||
resolution: {integrity: sha512-mzoCp2Oy9BDSug80fw3B3J4n4KQj1hRvoQOL1akqcDKBb5nvYxrik9zUEDs4AEJ6nK7QDTGoH0y9rx7AlnQ78Q==}
|
||||
peerDependencies:
|
||||
prettier: ^3.0.0
|
||||
|
||||
prettier-plugin-tailwindcss@0.7.2:
|
||||
resolution: {integrity: sha512-LkphyK3Fw+q2HdMOoiEHWf93fNtYJwfamoKPl7UwtjFQdei/iIBoX11G6j706FzN3ymX9mPVi97qIY8328vdnA==}
|
||||
engines: {node: '>=20.19'}
|
||||
peerDependencies:
|
||||
'@ianvs/prettier-plugin-sort-imports': '*'
|
||||
'@prettier/plugin-hermes': '*'
|
||||
'@prettier/plugin-oxc': '*'
|
||||
'@prettier/plugin-pug': '*'
|
||||
'@shopify/prettier-plugin-liquid': '*'
|
||||
'@trivago/prettier-plugin-sort-imports': '*'
|
||||
'@zackad/prettier-plugin-twig': '*'
|
||||
prettier: ^3.0
|
||||
prettier-plugin-astro: '*'
|
||||
prettier-plugin-css-order: '*'
|
||||
prettier-plugin-jsdoc: '*'
|
||||
prettier-plugin-marko: '*'
|
||||
prettier-plugin-multiline-arrays: '*'
|
||||
prettier-plugin-organize-attributes: '*'
|
||||
prettier-plugin-organize-imports: '*'
|
||||
prettier-plugin-sort-imports: '*'
|
||||
prettier-plugin-svelte: '*'
|
||||
peerDependenciesMeta:
|
||||
'@ianvs/prettier-plugin-sort-imports':
|
||||
optional: true
|
||||
'@prettier/plugin-hermes':
|
||||
optional: true
|
||||
'@prettier/plugin-oxc':
|
||||
optional: true
|
||||
'@prettier/plugin-pug':
|
||||
optional: true
|
||||
'@shopify/prettier-plugin-liquid':
|
||||
optional: true
|
||||
'@trivago/prettier-plugin-sort-imports':
|
||||
optional: true
|
||||
'@zackad/prettier-plugin-twig':
|
||||
optional: true
|
||||
prettier-plugin-astro:
|
||||
optional: true
|
||||
prettier-plugin-css-order:
|
||||
optional: true
|
||||
prettier-plugin-jsdoc:
|
||||
optional: true
|
||||
prettier-plugin-marko:
|
||||
optional: true
|
||||
prettier-plugin-multiline-arrays:
|
||||
optional: true
|
||||
prettier-plugin-organize-attributes:
|
||||
optional: true
|
||||
prettier-plugin-organize-imports:
|
||||
optional: true
|
||||
prettier-plugin-sort-imports:
|
||||
optional: true
|
||||
prettier-plugin-svelte:
|
||||
optional: true
|
||||
|
||||
prettier@3.7.4:
|
||||
resolution: {integrity: sha512-v6UNi1+3hSlVvv8fSaoUbggEM5VErKmmpGA7Pl3HF8V6uKY7rvClBOJlH6yNwQtfTueNkGVpOv/mtWL9L4bgRA==}
|
||||
engines: {node: '>=14'}
|
||||
hasBin: true
|
||||
|
||||
source-map-js@1.2.1:
|
||||
resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==}
|
||||
engines: {node: '>=0.10.0'}
|
||||
|
||||
tailwindcss@4.1.18:
|
||||
resolution: {integrity: sha512-4+Z+0yiYyEtUVCScyfHCxOYP06L5Ne+JiHhY2IjR2KWMIWhJOYZKLSGZaP5HkZ8+bY0cxfzwDE5uOmzFXyIwxw==}
|
||||
|
||||
tapable@2.3.0:
|
||||
resolution: {integrity: sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==}
|
||||
engines: {node: '>=6'}
|
||||
|
||||
to-regex-range@5.0.1:
|
||||
resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==}
|
||||
engines: {node: '>=8.0'}
|
||||
|
||||
snapshots:
|
||||
|
||||
'@alloc/quick-lru@5.2.0': {}
|
||||
|
||||
'@jridgewell/gen-mapping@0.3.13':
|
||||
dependencies:
|
||||
'@jridgewell/sourcemap-codec': 1.5.5
|
||||
'@jridgewell/trace-mapping': 0.3.31
|
||||
|
||||
'@jridgewell/remapping@2.3.5':
|
||||
dependencies:
|
||||
'@jridgewell/gen-mapping': 0.3.13
|
||||
'@jridgewell/trace-mapping': 0.3.31
|
||||
|
||||
'@jridgewell/resolve-uri@3.1.2': {}
|
||||
|
||||
'@jridgewell/sourcemap-codec@1.5.5': {}
|
||||
|
||||
'@jridgewell/trace-mapping@0.3.31':
|
||||
dependencies:
|
||||
'@jridgewell/resolve-uri': 3.1.2
|
||||
'@jridgewell/sourcemap-codec': 1.5.5
|
||||
|
||||
'@parcel/watcher-android-arm64@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-darwin-arm64@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-darwin-x64@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-freebsd-x64@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-linux-arm-glibc@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-linux-arm-musl@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-linux-arm64-glibc@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-linux-arm64-musl@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-linux-x64-glibc@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-linux-x64-musl@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-win32-arm64@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-win32-ia32@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher-win32-x64@2.5.1':
|
||||
optional: true
|
||||
|
||||
'@parcel/watcher@2.5.1':
|
||||
dependencies:
|
||||
detect-libc: 1.0.3
|
||||
is-glob: 4.0.3
|
||||
micromatch: 4.0.8
|
||||
node-addon-api: 7.1.1
|
||||
optionalDependencies:
|
||||
'@parcel/watcher-android-arm64': 2.5.1
|
||||
'@parcel/watcher-darwin-arm64': 2.5.1
|
||||
'@parcel/watcher-darwin-x64': 2.5.1
|
||||
'@parcel/watcher-freebsd-x64': 2.5.1
|
||||
'@parcel/watcher-linux-arm-glibc': 2.5.1
|
||||
'@parcel/watcher-linux-arm-musl': 2.5.1
|
||||
'@parcel/watcher-linux-arm64-glibc': 2.5.1
|
||||
'@parcel/watcher-linux-arm64-musl': 2.5.1
|
||||
'@parcel/watcher-linux-x64-glibc': 2.5.1
|
||||
'@parcel/watcher-linux-x64-musl': 2.5.1
|
||||
'@parcel/watcher-win32-arm64': 2.5.1
|
||||
'@parcel/watcher-win32-ia32': 2.5.1
|
||||
'@parcel/watcher-win32-x64': 2.5.1
|
||||
|
||||
'@tailwindcss/cli@4.1.18':
|
||||
dependencies:
|
||||
'@parcel/watcher': 2.5.1
|
||||
'@tailwindcss/node': 4.1.18
|
||||
'@tailwindcss/oxide': 4.1.18
|
||||
enhanced-resolve: 5.18.4
|
||||
mri: 1.2.0
|
||||
picocolors: 1.1.1
|
||||
tailwindcss: 4.1.18
|
||||
|
||||
'@tailwindcss/node@4.1.18':
|
||||
dependencies:
|
||||
'@jridgewell/remapping': 2.3.5
|
||||
enhanced-resolve: 5.18.4
|
||||
jiti: 2.6.1
|
||||
lightningcss: 1.30.2
|
||||
magic-string: 0.30.21
|
||||
source-map-js: 1.2.1
|
||||
tailwindcss: 4.1.18
|
||||
|
||||
'@tailwindcss/oxide-android-arm64@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-darwin-arm64@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-darwin-x64@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-freebsd-x64@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-linux-arm-gnueabihf@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-linux-arm64-gnu@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-linux-arm64-musl@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-linux-x64-gnu@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-linux-x64-musl@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-wasm32-wasi@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-win32-arm64-msvc@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide-win32-x64-msvc@4.1.18':
|
||||
optional: true
|
||||
|
||||
'@tailwindcss/oxide@4.1.18':
|
||||
optionalDependencies:
|
||||
'@tailwindcss/oxide-android-arm64': 4.1.18
|
||||
'@tailwindcss/oxide-darwin-arm64': 4.1.18
|
||||
'@tailwindcss/oxide-darwin-x64': 4.1.18
|
||||
'@tailwindcss/oxide-freebsd-x64': 4.1.18
|
||||
'@tailwindcss/oxide-linux-arm-gnueabihf': 4.1.18
|
||||
'@tailwindcss/oxide-linux-arm64-gnu': 4.1.18
|
||||
'@tailwindcss/oxide-linux-arm64-musl': 4.1.18
|
||||
'@tailwindcss/oxide-linux-x64-gnu': 4.1.18
|
||||
'@tailwindcss/oxide-linux-x64-musl': 4.1.18
|
||||
'@tailwindcss/oxide-wasm32-wasi': 4.1.18
|
||||
'@tailwindcss/oxide-win32-arm64-msvc': 4.1.18
|
||||
'@tailwindcss/oxide-win32-x64-msvc': 4.1.18
|
||||
|
||||
'@tailwindcss/postcss@4.1.18':
|
||||
dependencies:
|
||||
'@alloc/quick-lru': 5.2.0
|
||||
'@tailwindcss/node': 4.1.18
|
||||
'@tailwindcss/oxide': 4.1.18
|
||||
postcss: 8.5.6
|
||||
tailwindcss: 4.1.18
|
||||
|
||||
braces@3.0.3:
|
||||
dependencies:
|
||||
fill-range: 7.1.1
|
||||
|
||||
daisyui@5.5.14: {}
|
||||
|
||||
detect-libc@1.0.3: {}
|
||||
|
||||
detect-libc@2.1.2: {}
|
||||
|
||||
enhanced-resolve@5.18.4:
|
||||
dependencies:
|
||||
graceful-fs: 4.2.11
|
||||
tapable: 2.3.0
|
||||
|
||||
fill-range@7.1.1:
|
||||
dependencies:
|
||||
to-regex-range: 5.0.1
|
||||
|
||||
graceful-fs@4.2.11: {}
|
||||
|
||||
is-extglob@2.1.1: {}
|
||||
|
||||
is-glob@4.0.3:
|
||||
dependencies:
|
||||
is-extglob: 2.1.1
|
||||
|
||||
is-number@7.0.0: {}
|
||||
|
||||
jiti@2.6.1: {}
|
||||
|
||||
lightningcss-android-arm64@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-darwin-arm64@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-darwin-x64@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-freebsd-x64@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-linux-arm-gnueabihf@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-linux-arm64-gnu@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-linux-arm64-musl@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-linux-x64-gnu@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-linux-x64-musl@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-win32-arm64-msvc@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss-win32-x64-msvc@1.30.2:
|
||||
optional: true
|
||||
|
||||
lightningcss@1.30.2:
|
||||
dependencies:
|
||||
detect-libc: 2.1.2
|
||||
optionalDependencies:
|
||||
lightningcss-android-arm64: 1.30.2
|
||||
lightningcss-darwin-arm64: 1.30.2
|
||||
lightningcss-darwin-x64: 1.30.2
|
||||
lightningcss-freebsd-x64: 1.30.2
|
||||
lightningcss-linux-arm-gnueabihf: 1.30.2
|
||||
lightningcss-linux-arm64-gnu: 1.30.2
|
||||
lightningcss-linux-arm64-musl: 1.30.2
|
||||
lightningcss-linux-x64-gnu: 1.30.2
|
||||
lightningcss-linux-x64-musl: 1.30.2
|
||||
lightningcss-win32-arm64-msvc: 1.30.2
|
||||
lightningcss-win32-x64-msvc: 1.30.2
|
||||
|
||||
magic-string@0.30.21:
|
||||
dependencies:
|
||||
'@jridgewell/sourcemap-codec': 1.5.5
|
||||
|
||||
micromatch@4.0.8:
|
||||
dependencies:
|
||||
braces: 3.0.3
|
||||
picomatch: 2.3.1
|
||||
|
||||
mri@1.2.0: {}
|
||||
|
||||
nanoid@3.3.11: {}
|
||||
|
||||
node-addon-api@7.1.1: {}
|
||||
|
||||
picocolors@1.1.1: {}
|
||||
|
||||
picomatch@2.3.1: {}
|
||||
|
||||
postcss@8.5.6:
|
||||
dependencies:
|
||||
nanoid: 3.3.11
|
||||
picocolors: 1.1.1
|
||||
source-map-js: 1.2.1
|
||||
|
||||
prettier-plugin-jinja-template@2.1.0(prettier@3.7.4):
|
||||
dependencies:
|
||||
prettier: 3.7.4
|
||||
|
||||
prettier-plugin-tailwindcss@0.7.2(prettier@3.7.4):
|
||||
dependencies:
|
||||
prettier: 3.7.4
|
||||
|
||||
prettier@3.7.4: {}
|
||||
|
||||
source-map-js@1.2.1: {}
|
||||
|
||||
tailwindcss@4.1.18: {}
|
||||
|
||||
tapable@2.3.0: {}
|
||||
|
||||
to-regex-range@5.0.1:
|
||||
dependencies:
|
||||
is-number: 7.0.0
|
||||
|
||||
3
pnpm-workspace.yaml
Normal file
@ -0,0 +1,3 @@
|
||||
onlyBuiltDependencies:
|
||||
- '@parcel/watcher'
|
||||
- '@tailwindcss/oxide'
|
||||
@ -9,7 +9,8 @@ dependencies = [
|
||||
"django>=5.2.1",
|
||||
"django-extensions>=4.1",
|
||||
"django-model-utils>=5.0.0",
|
||||
"django-ninja>=1.4.1",
|
||||
"django-ninja>=1.4.5",
|
||||
"django-ninja-extra>=0.30.6",
|
||||
"django-oauth-toolkit>=3.0.1",
|
||||
"django-polymorphic>=4.1.0",
|
||||
"enviformer",
|
||||
@ -18,6 +19,7 @@ dependencies = [
|
||||
"envipy-plugins",
|
||||
"epam-indigo>=1.30.1",
|
||||
"gunicorn>=23.0.0",
|
||||
"jsonref>=1.1.0",
|
||||
"networkx>=3.4.2",
|
||||
"psycopg2-binary>=2.9.10",
|
||||
"python-dotenv>=1.1.0",
|
||||
@ -34,7 +36,7 @@ dependencies = [
|
||||
[tool.uv.sources]
|
||||
enviformer = { git = "ssh://git@git.envipath.com/enviPath/enviformer.git", rev = "v0.1.4" }
|
||||
envipy-plugins = { git = "ssh://git@git.envipath.com/enviPath/enviPy-plugins.git", rev = "v0.1.0" }
|
||||
envipy-additional-information = { git = "ssh://git@git.envipath.com/enviPath/enviPy-additional-information.git", rev = "v0.1.7"}
|
||||
envipy-additional-information = { git = "ssh://git@git.envipath.com/enviPath/enviPy-additional-information.git", rev = "v0.4.2" }
|
||||
envipy-ambit = { git = "ssh://git@git.envipath.com/enviPath/enviPy-ambit.git" }
|
||||
|
||||
[project.optional-dependencies]
|
||||
@ -45,6 +47,9 @@ dev = [
|
||||
"poethepoet>=0.37.0",
|
||||
"pre-commit>=4.3.0",
|
||||
"ruff>=0.13.3",
|
||||
"pytest-playwright>=0.7.1",
|
||||
"pytest-django>=4.11.1",
|
||||
"pytest-cov>=7.0.0",
|
||||
]
|
||||
|
||||
[tool.ruff]
|
||||
@ -66,28 +71,82 @@ docstring-code-format = true
|
||||
|
||||
[tool.poe.tasks]
|
||||
# Main tasks
|
||||
setup = { sequence = ["db-up", "migrate", "bootstrap"], help = "Complete setup: start database, run migrations, and bootstrap data" }
|
||||
dev = { cmd = "python manage.py runserver", help = "Start the development server", deps = ["db-up"] }
|
||||
setup = { sequence = [
|
||||
"db-up",
|
||||
"migrate",
|
||||
"bootstrap",
|
||||
], help = "Complete setup: start database, run migrations, and bootstrap data" }
|
||||
dev = { cmd = "uv run python scripts/dev_server.py", help = "Start the development server with CSS watcher", deps = [
|
||||
"db-up",
|
||||
"js-deps",
|
||||
] }
|
||||
build = { sequence = [
|
||||
"build-frontend",
|
||||
"collectstatic",
|
||||
], help = "Build frontend assets and collect static files" }
|
||||
|
||||
# Database tasks
|
||||
db-up = { cmd = "docker compose -f docker-compose.dev.yml up -d", help = "Start PostgreSQL database using Docker Compose" }
|
||||
db-down = { cmd = "docker compose -f docker-compose.dev.yml down", help = "Stop PostgreSQL database" }
|
||||
db-up = { cmd = "docker compose -p envipath -f docker-compose.dev.yml up -d", help = "Start PostgreSQL database using Docker Compose" }
|
||||
db-down = { cmd = "docker compose -p envipath -f docker-compose.dev.yml down", help = "Stop PostgreSQL database" }
|
||||
|
||||
# Celery tasks
|
||||
celery = { cmd = "celery -A envipath worker -l INFO -Q predict,model,background", help = "Start Celery worker for async task processing" }
|
||||
celery-dev = { sequence = [
|
||||
"db-up",
|
||||
"celery",
|
||||
], help = "Start database and Celery worker" }
|
||||
|
||||
# Frontend tasks
|
||||
js-deps = { cmd = "uv run python scripts/pnpm_wrapper.py install", help = "Install frontend dependencies" }
|
||||
|
||||
# Full cleanup tasks
|
||||
clean = { sequence = ["clean-db"], help = "Remove model files and database volumes (WARNING: destroys all data!)" }
|
||||
clean = { sequence = [
|
||||
"clean-db",
|
||||
], help = "Remove model files and database volumes (WARNING: destroys all data!)" }
|
||||
clean-db = { cmd = "docker compose -f docker-compose.dev.yml down -v", help = "Removes the database container and volume." }
|
||||
|
||||
# Django tasks
|
||||
migrate = { cmd = "python manage.py migrate", help = "Run database migrations" }
|
||||
migrate = { cmd = "uv run python manage.py migrate", help = "Run database migrations" }
|
||||
bootstrap = { shell = """
|
||||
echo "Bootstrapping initial data..."
|
||||
echo "This will take a bit ⏱️. Get yourself some coffee..."
|
||||
python manage.py bootstrap
|
||||
uv run python manage.py bootstrap
|
||||
echo "✓ Bootstrap complete"
|
||||
echo ""
|
||||
echo "Default admin credentials:"
|
||||
echo " Username: admin"
|
||||
echo " Email: admin@envipath.com"
|
||||
echo " Password: SuperSafe"
|
||||
""", help = "Bootstrap initial data (anonymous user, packages, models)" }
|
||||
shell = { cmd = "python manage.py shell", help = "Open Django shell" }
|
||||
""", help = "Bootstrap initial data (anonymous user, packages, models)" }
|
||||
shell = { cmd = "uv run python manage.py shell", help = "Open Django shell" }
|
||||
|
||||
|
||||
build-frontend = { cmd = "uv run python scripts/pnpm_wrapper.py run build", help = "Build frontend assets using pnpm", deps = [
|
||||
"js-deps",
|
||||
] } # Build tasks
|
||||
|
||||
|
||||
collectstatic = { cmd = "uv run python manage.py collectstatic --noinput", help = "Collect static files for production", deps = [
|
||||
"build-frontend",
|
||||
] }
|
||||
|
||||
frontend-test-setup = { cmd = "playwright install", help = "Install the browsers required for frontend testing" }
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
addopts = "--verbose --capture=no --durations=10"
|
||||
testpaths = ["tests", "*/tests"]
|
||||
pythonpath = ["."]
|
||||
norecursedirs = [
|
||||
"env",
|
||||
"venv",
|
||||
"envipy-plugins",
|
||||
"envipy-additional-information",
|
||||
"envipy-ambit",
|
||||
"enviformer",
|
||||
]
|
||||
markers = [
|
||||
"api: API tests",
|
||||
"frontend: Frontend tests",
|
||||
"end2end: End-to-end tests",
|
||||
"slow: Slow tests",
|
||||
]
|
||||
|
||||
206
scripts/dev_server.py
Executable file
@ -0,0 +1,206 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Cross-platform development server script.
|
||||
Starts pnpm CSS watcher and Django dev server, handling cleanup on exit.
|
||||
Works on both Windows and Unix systems.
|
||||
"""
|
||||
|
||||
import atexit
|
||||
import shutil
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import dotenv
|
||||
|
||||
|
||||
def find_pnpm():
|
||||
"""
|
||||
Find pnpm executable on the system.
|
||||
Returns the path to pnpm or None if not found.
|
||||
"""
|
||||
# Try to find pnpm using shutil.which
|
||||
# On Windows, this will find pnpm.cmd if it's in PATH
|
||||
pnpm_path = shutil.which("pnpm")
|
||||
|
||||
if pnpm_path:
|
||||
return pnpm_path
|
||||
|
||||
# On Windows, also try pnpm.cmd explicitly
|
||||
if sys.platform == "win32":
|
||||
pnpm_cmd = shutil.which("pnpm.cmd")
|
||||
if pnpm_cmd:
|
||||
return pnpm_cmd
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class DevServerManager:
|
||||
"""Manages background processes for development server."""
|
||||
|
||||
def __init__(self):
|
||||
self.processes = []
|
||||
self._cleanup_registered = False
|
||||
|
||||
def start_process(self, command, description, shell=False):
|
||||
"""Start a background process and return the process object."""
|
||||
print(f"Starting {description}...")
|
||||
try:
|
||||
if shell:
|
||||
# Use shell=True for commands that need shell interpretation
|
||||
process = subprocess.Popen(
|
||||
command,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
bufsize=1,
|
||||
)
|
||||
else:
|
||||
# Split command into list for subprocess
|
||||
process = subprocess.Popen(
|
||||
command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
bufsize=1,
|
||||
)
|
||||
self.processes.append((process, description))
|
||||
print(" ".join(command))
|
||||
print(f"✓ Started {description} (PID: {process.pid})")
|
||||
return process
|
||||
except Exception as e:
|
||||
print(f"✗ Failed to start {description}: {e}", file=sys.stderr)
|
||||
self.cleanup()
|
||||
sys.exit(1)
|
||||
|
||||
def cleanup(self):
|
||||
"""Terminate all running processes."""
|
||||
if not self.processes:
|
||||
return
|
||||
|
||||
print("\nShutting down...")
|
||||
for process, description in self.processes:
|
||||
if process.poll() is None: # Process is still running
|
||||
try:
|
||||
# Try graceful termination first
|
||||
if sys.platform == "win32":
|
||||
process.terminate()
|
||||
else:
|
||||
process.send_signal(signal.SIGTERM)
|
||||
|
||||
# Wait up to 5 seconds for graceful shutdown
|
||||
try:
|
||||
process.wait(timeout=5)
|
||||
except subprocess.TimeoutExpired:
|
||||
# Force kill if graceful shutdown failed
|
||||
if sys.platform == "win32":
|
||||
process.kill()
|
||||
else:
|
||||
process.send_signal(signal.SIGKILL)
|
||||
process.wait()
|
||||
|
||||
print(f"✓ {description} stopped")
|
||||
except Exception as e:
|
||||
print(f"✗ Error stopping {description}: {e}", file=sys.stderr)
|
||||
|
||||
self.processes.clear()
|
||||
|
||||
def register_cleanup(self):
|
||||
"""Register cleanup handlers for various exit scenarios."""
|
||||
if self._cleanup_registered:
|
||||
return
|
||||
|
||||
self._cleanup_registered = True
|
||||
|
||||
# Register atexit handler (works on all platforms)
|
||||
atexit.register(self.cleanup)
|
||||
|
||||
# Register signal handlers (Unix only)
|
||||
if sys.platform != "win32":
|
||||
signal.signal(signal.SIGINT, self._signal_handler)
|
||||
signal.signal(signal.SIGTERM, self._signal_handler)
|
||||
|
||||
def _signal_handler(self, signum, frame):
|
||||
"""Handle Unix signals."""
|
||||
self.cleanup()
|
||||
sys.exit(0)
|
||||
|
||||
def wait_for_process(self, process, description):
|
||||
"""Wait for a process to finish and handle its output."""
|
||||
try:
|
||||
# Stream output from the process
|
||||
for line in iter(process.stdout.readline, ""):
|
||||
if line:
|
||||
print(f"[{description}] {line.rstrip()}")
|
||||
|
||||
process.wait()
|
||||
return process.returncode
|
||||
except KeyboardInterrupt:
|
||||
# Handle Ctrl+C
|
||||
self.cleanup()
|
||||
sys.exit(0)
|
||||
except Exception as e:
|
||||
print(f"Error waiting for {description}: {e}", file=sys.stderr)
|
||||
self.cleanup()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point."""
|
||||
dotenv.load_dotenv()
|
||||
manager = DevServerManager()
|
||||
manager.register_cleanup()
|
||||
|
||||
# Find pnpm executable
|
||||
pnpm_path = find_pnpm()
|
||||
if not pnpm_path:
|
||||
print("Error: pnpm not found in PATH.", file=sys.stderr)
|
||||
print("\nPlease install pnpm:", file=sys.stderr)
|
||||
print(" Windows: https://pnpm.io/installation#on-windows", file=sys.stderr)
|
||||
print(" Unix: https://pnpm.io/installation#on-posix-systems", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Determine shell usage based on platform
|
||||
use_shell = sys.platform == "win32"
|
||||
|
||||
# Start pnpm CSS watcher
|
||||
# Use the found pnpm path to ensure it works on Windows
|
||||
pnpm_command = f'"{pnpm_path}" run dev' if use_shell else [pnpm_path, "run", "dev"]
|
||||
manager.start_process(
|
||||
pnpm_command,
|
||||
"CSS watcher",
|
||||
shell=use_shell,
|
||||
)
|
||||
|
||||
# Give pnpm a moment to start
|
||||
time.sleep(1)
|
||||
|
||||
# Start Django dev server
|
||||
port = os.environ.get("DJANGO_PORT", "8000")
|
||||
django_process = manager.start_process(
|
||||
["uv", "run", "python", "manage.py", "runserver", f"0:{port}"],
|
||||
f"Django server on port {port}",
|
||||
shell=False,
|
||||
)
|
||||
|
||||
print("\nDevelopment servers are running. Press Ctrl+C to stop.\n")
|
||||
|
||||
try:
|
||||
# Wait for Django server (main process)
|
||||
# If Django exits, we should clean up everything
|
||||
return_code = manager.wait_for_process(django_process, "Django")
|
||||
|
||||
# If Django exited unexpectedly, clean up and exit
|
||||
if return_code != 0:
|
||||
manager.cleanup()
|
||||
sys.exit(return_code)
|
||||
except KeyboardInterrupt:
|
||||
# Ctrl+C was pressed
|
||||
manager.cleanup()
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
59
scripts/pnpm_wrapper.py
Executable file
@ -0,0 +1,59 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Cross-platform pnpm command wrapper.
|
||||
Finds pnpm correctly on Windows (handles pnpm.cmd) and Unix systems.
|
||||
"""
|
||||
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
def find_pnpm():
|
||||
"""
|
||||
Find pnpm executable on the system.
|
||||
Returns the path to pnpm or None if not found.
|
||||
"""
|
||||
# Try to find pnpm using shutil.which
|
||||
# On Windows, this will find pnpm.cmd if it's in PATH
|
||||
pnpm_path = shutil.which("pnpm")
|
||||
|
||||
if pnpm_path:
|
||||
return pnpm_path
|
||||
|
||||
# On Windows, also try pnpm.cmd explicitly
|
||||
if sys.platform == "win32":
|
||||
pnpm_cmd = shutil.which("pnpm.cmd")
|
||||
if pnpm_cmd:
|
||||
return pnpm_cmd
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point - execute pnpm with provided arguments."""
|
||||
pnpm_path = find_pnpm()
|
||||
|
||||
if not pnpm_path:
|
||||
print("Error: pnpm not found in PATH.", file=sys.stderr)
|
||||
print("\nPlease install pnpm:", file=sys.stderr)
|
||||
print(" Windows: https://pnpm.io/installation#on-windows", file=sys.stderr)
|
||||
print(" Unix: https://pnpm.io/installation#on-posix-systems", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Get all arguments passed to this script
|
||||
args = sys.argv[1:]
|
||||
|
||||
# Execute pnpm with the provided arguments
|
||||
try:
|
||||
sys.exit(subprocess.call([pnpm_path] + args))
|
||||
except KeyboardInterrupt:
|
||||
# Handle Ctrl+C gracefully
|
||||
sys.exit(130)
|
||||
except Exception as e:
|
||||
print(f"Error executing pnpm: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
84
static/css/daisyui-theme.css
Normal file
@ -0,0 +1,84 @@
|
||||
/**
|
||||
* DaisyUI Themes - Generated by Style Dictionary
|
||||
* Theme mappings defined in tokens/daisyui-themes.json
|
||||
*/
|
||||
|
||||
/* Light theme (default) */
|
||||
@plugin "daisyui/theme" {
|
||||
name: "envipath";
|
||||
default: true;
|
||||
color-scheme: light;
|
||||
|
||||
--color-base-100: var(--color-neutral-50);
|
||||
--color-base-200: var(--color-neutral-100);
|
||||
--color-base-300: var(--color-neutral-200);
|
||||
--color-base-content: var(--color-neutral-900);
|
||||
--color-primary: var(--color-primary-500);
|
||||
--color-primary-content: var(--color-primary-50);
|
||||
--color-secondary: var(--color-secondary-500);
|
||||
--color-secondary-content: var(--color-secondary-50);
|
||||
--color-accent: var(--color-accent-500);
|
||||
--color-accent-content: var(--color-accent-50);
|
||||
--color-neutral: var(--color-neutral-950);
|
||||
--color-neutral-content: var(--color-neutral-100);
|
||||
--color-info: var(--color-info-500);
|
||||
--color-info-content: var(--color-info-950);
|
||||
--color-success: var(--color-success-500);
|
||||
--color-success-content: var(--color-success-950);
|
||||
--color-warning: var(--color-warning-500);
|
||||
--color-warning-content: var(--color-warning-950);
|
||||
--color-error: var(--color-error-500);
|
||||
--color-error-content: var(--color-error-950);
|
||||
|
||||
/* border radius */
|
||||
--radius-selector: 1rem;
|
||||
--radius-field: 0.25rem;
|
||||
--radius-box: 0.5rem;
|
||||
|
||||
/* base sizes */
|
||||
--size-selector: 0.25rem;
|
||||
--size-field: 0.25rem;
|
||||
|
||||
/* border size */
|
||||
--border: 1px;
|
||||
|
||||
/* effects */
|
||||
--depth: 1;
|
||||
--noise: 0;
|
||||
}
|
||||
|
||||
/* Dark theme (prefers-color-scheme: dark) */
|
||||
@plugin "daisyui/theme" {
|
||||
name: "envipath-dark";
|
||||
prefersdark: true;
|
||||
color-scheme: dark;
|
||||
|
||||
--color-primary: var(--color-primary-400);
|
||||
--color-primary-content: var(--color-neutral-950);
|
||||
--color-secondary: var(--color-secondary-400);
|
||||
--color-secondary-content: var(--color-neutral-950);
|
||||
--color-accent: var(--color-primary-500);
|
||||
--color-accent-content: var(--color-neutral-950);
|
||||
--color-neutral: var(--color-neutral-300);
|
||||
--color-neutral-content: var(--color-neutral-900);
|
||||
--color-base-100: var(--color-neutral-900);
|
||||
--color-base-200: var(--color-neutral-800);
|
||||
--color-base-300: var(--color-neutral-700);
|
||||
--color-base-content: var(--color-neutral-50);
|
||||
--color-info: var(--color-primary-400);
|
||||
--color-info-content: var(--color-neutral-950);
|
||||
--color-success: var(--color-success-400);
|
||||
--color-success-content: var(--color-neutral-950);
|
||||
--color-warning: var(--color-warning-400);
|
||||
--color-warning-content: var(--color-neutral-950);
|
||||
--color-error: var(--color-error-400);
|
||||
--color-error-content: var(--color-neutral-950);
|
||||
--radius-selector: 1rem;
|
||||
--radius-field: 0.25rem;
|
||||
--radius-box: 0.5rem;
|
||||
--size-selector: 0.25rem;
|
||||
--size-field: 0.25rem;
|
||||
--border: 1px;
|
||||
--depth: 1;
|
||||
--noise: 0;
|
||||
}
|
||||
36
static/css/input.css
Normal file
@ -0,0 +1,36 @@
|
||||
@import "tailwindcss";
|
||||
|
||||
/* fira-code-latin-wght-normal */
|
||||
@font-face {
|
||||
font-family: 'Fira Code Variable';
|
||||
font-style: normal;
|
||||
font-display: swap;
|
||||
font-weight: 300 700;
|
||||
src: url(https://cdn.jsdelivr.net/fontsource/fonts/fira-code:vf@latest/latin-wght-normal.woff2) format('woff2-variations');
|
||||
unicode-range: U+0000-00FF,U+0131,U+0152-0153,U+02BB-02BC,U+02C6,U+02DA,U+02DC,U+0304,U+0308,U+0329,U+2000-206F,U+20AC,U+2122,U+2191,U+2193,U+2212,U+2215,U+FEFF,U+FFFD;
|
||||
}
|
||||
|
||||
/* inter-latin-wght-normal */
|
||||
@font-face {
|
||||
font-family: 'Inter Variable';
|
||||
font-style: normal;
|
||||
font-display: swap;
|
||||
font-weight: 100 900;
|
||||
src: url(https://cdn.jsdelivr.net/fontsource/fonts/inter:vf@latest/latin-wght-normal.woff2) format('woff2-variations');
|
||||
unicode-range: U+0000-00FF,U+0131,U+0152-0153,U+02BB-02BC,U+02C6,U+02DA,U+02DC,U+0304,U+0308,U+0329,U+2000-206F,U+20AC,U+2122,U+2191,U+2193,U+2212,U+2215,U+FEFF,U+FFFD;
|
||||
}
|
||||
|
||||
|
||||
/* Tell Tailwind where to find Django templates and Python files */
|
||||
@source "../../templates";
|
||||
|
||||
/* Custom theme configuration - must come before plugins */
|
||||
@import "./theme.css";
|
||||
|
||||
/* Import DaisyUI plugin */
|
||||
@plugin "daisyui" {
|
||||
logs: true;
|
||||
exclude: rootscrollgutter;
|
||||
}
|
||||
|
||||
@import "./daisyui-theme.css";
|
||||
111
static/css/theme.css
Normal file
@ -0,0 +1,111 @@
|
||||
/**
|
||||
* Tailwind v4 Theme - Generated by Style Dictionary
|
||||
* This creates Tailwind utility classes from design tokens
|
||||
*/
|
||||
|
||||
@theme {
|
||||
/* Colors */
|
||||
--color-primary-50: oklch(0.98 0.02 201);
|
||||
--color-primary-100: oklch(0.96 0.04 203);
|
||||
--color-primary-200: oklch(0.92 0.08 205);
|
||||
--color-primary-300: oklch(0.87 0.12 207);
|
||||
--color-primary-400: oklch(0.80 0.13 212);
|
||||
--color-primary-500: oklch(0.71 0.13 215);
|
||||
--color-primary-600: oklch(0.61 0.11 222);
|
||||
--color-primary-700: oklch(0.52 0.09 223);
|
||||
--color-primary-800: oklch(0.45 0.08 224);
|
||||
--color-primary-900: oklch(0.40 0.07 227);
|
||||
--color-primary-950: oklch(0.30 0.05 230);
|
||||
--color-secondary-50: oklch(0.98 0.02 166);
|
||||
--color-secondary-100: oklch(0.95 0.05 163);
|
||||
--color-secondary-200: oklch(0.90 0.09 164);
|
||||
--color-secondary-300: oklch(0.85 0.13 165);
|
||||
--color-secondary-400: oklch(0.77 0.15 163);
|
||||
--color-secondary-500: oklch(0.70 0.15 162);
|
||||
--color-secondary-600: oklch(0.60 0.13 163);
|
||||
--color-secondary-700: oklch(0.51 0.10 166);
|
||||
--color-secondary-800: oklch(0.43 0.09 167);
|
||||
--color-secondary-900: oklch(0.38 0.07 169);
|
||||
--color-secondary-950: oklch(0.26 0.05 173);
|
||||
--color-success-50: oklch(0.98 0.02 156);
|
||||
--color-success-100: oklch(0.96 0.04 157);
|
||||
--color-success-200: oklch(0.93 0.08 156);
|
||||
--color-success-300: oklch(0.87 0.14 154);
|
||||
--color-success-400: oklch(0.80 0.18 152);
|
||||
--color-success-500: oklch(0.72 0.19 150);
|
||||
--color-success-600: oklch(0.63 0.17 149);
|
||||
--color-success-700: oklch(0.53 0.14 150);
|
||||
--color-success-800: oklch(0.45 0.11 151);
|
||||
--color-success-900: oklch(0.39 0.09 153);
|
||||
--color-success-950: oklch(0.27 0.06 153);
|
||||
--color-warning-50: oklch(0.99 0.03 102);
|
||||
--color-warning-100: oklch(0.97 0.07 103);
|
||||
--color-warning-200: oklch(0.95 0.12 102);
|
||||
--color-warning-300: oklch(0.91 0.17 98);
|
||||
--color-warning-400: oklch(0.86 0.17 92);
|
||||
--color-warning-500: oklch(0.80 0.16 86);
|
||||
--color-warning-600: oklch(0.68 0.14 76);
|
||||
--color-warning-700: oklch(0.55 0.12 66);
|
||||
--color-warning-800: oklch(0.48 0.10 62);
|
||||
--color-warning-900: oklch(0.42 0.09 58);
|
||||
--color-warning-950: oklch(0.29 0.06 54);
|
||||
--color-error-50: oklch(0.97 0.01 17);
|
||||
--color-error-100: oklch(0.94 0.03 18);
|
||||
--color-error-200: oklch(0.88 0.06 18);
|
||||
--color-error-300: oklch(0.81 0.10 20);
|
||||
--color-error-400: oklch(0.71 0.17 22);
|
||||
--color-error-500: oklch(0.64 0.21 25);
|
||||
--color-error-600: oklch(0.58 0.22 27);
|
||||
--color-error-700: oklch(0.51 0.19 28);
|
||||
--color-error-800: oklch(0.44 0.16 27);
|
||||
--color-error-900: oklch(0.40 0.13 26);
|
||||
--color-error-950: oklch(0.26 0.09 26);
|
||||
--color-neutral-50: oklch(0.98 0.00 248);
|
||||
--color-neutral-100: oklch(0.97 0.01 248);
|
||||
--color-neutral-200: oklch(0.93 0.01 256);
|
||||
--color-neutral-300: oklch(0.87 0.02 253);
|
||||
--color-neutral-400: oklch(0.71 0.04 257);
|
||||
--color-neutral-500: oklch(0.55 0.04 257);
|
||||
--color-neutral-600: oklch(0.45 0.04 257);
|
||||
--color-neutral-700: oklch(0.37 0.04 257);
|
||||
--color-neutral-800: oklch(0.28 0.04 260);
|
||||
--color-neutral-900: oklch(0.28 0.04 260);
|
||||
--color-neutral-950: oklch(0.28 0.04 260);
|
||||
|
||||
/* Spacing */
|
||||
--spacing-0: 0;
|
||||
--spacing-1: 0.25rem;
|
||||
--spacing-2: 0.5rem;
|
||||
--spacing-3: 0.75rem;
|
||||
--spacing-4: 1rem;
|
||||
--spacing-5: 1.25rem;
|
||||
--spacing-6: 1.5rem;
|
||||
--spacing-7: 1.75rem;
|
||||
--spacing-8: 2rem;
|
||||
--spacing-10: 2.5rem;
|
||||
--spacing-12: 3rem;
|
||||
--spacing-16: 4rem;
|
||||
--spacing-20: 5rem;
|
||||
--spacing-24: 6rem;
|
||||
--spacing-32: 8rem;
|
||||
--spacing-40: 10rem;
|
||||
--spacing-48: 12rem;
|
||||
--spacing-56: 14rem;
|
||||
--spacing-64: 16rem;
|
||||
|
||||
/* Typography */
|
||||
--font-family-sans: 'Inter Variable', system-ui, -apple-system, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif;
|
||||
--font-family-mono: 'Fira Code Variable', 'Cascadia Code', 'Source Code Pro', Menlo, Consolas, monospace;
|
||||
--font-family-base: 'Inter Variable', system-ui, -apple-system, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif;
|
||||
--font-size-xs: 0.75rem;
|
||||
--font-size-sm: 0.875rem;
|
||||
--font-size-base: 1rem;
|
||||
--font-size-lg: 1.125rem;
|
||||
--font-size-xl: 1.25rem;
|
||||
--font-size-2xl: 1.5rem;
|
||||
--font-size-3xl: 1.875rem;
|
||||
--font-size-4xl: 2.25rem;
|
||||
--font-size-5xl: 3rem;
|
||||
--font-size-6xl: 3.75rem;
|
||||
--font-size-7xl: 4.5rem;
|
||||
}
|
||||
BIN
static/images/ep-rule-artwork.png
Normal file
|
After Width: | Height: | Size: 252 KiB |
BIN
static/images/hero.png
Normal file
|
After Width: | Height: | Size: 1.9 MiB |
BIN
static/images/linkedin.png
Normal file
|
After Width: | Height: | Size: 11 KiB |
3
static/images/logo-eawag.svg
Normal file
|
After Width: | Height: | Size: 21 KiB |
@ -1,225 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
version="1.1"
|
||||
width="314.98749"
|
||||
height="28.8125"
|
||||
id="svg3004"
|
||||
xml:space="preserve"><metadata
|
||||
id="metadata3010"><rdf:RDF><cc:Work
|
||||
rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
|
||||
id="defs3008" /><g
|
||||
transform="matrix(1.25,0,0,-1.25,0,28.8125)"
|
||||
id="g3012"><g
|
||||
transform="scale(0.1,0.1)"
|
||||
id="g3014"><path
|
||||
d="m 957.473,175.816 0,-4.296 -18.453,0 0,-48.614 -5.04,0 0,48.614 -18.378,0 0,4.296 41.871,0"
|
||||
id="path3016"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 969.695,175.816 0,-22.968 31.425,0 0,22.968 5.04,0 0,-52.91 -5.04,0 0,25.637 -31.425,0 0,-25.637 -5.039,0 0,52.91 5.039,0"
|
||||
id="path3018"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1055.58,175.816 0,-4.296 -31.49,0 0,-19.122 29.49,0 0,-4.293 -29.49,0 0,-20.898 31.87,0 0,-4.301 -36.91,0 0,52.91 36.53,0"
|
||||
id="path3020"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1124.58,175.816 0,-4.296 -31.5,0 0,-19.122 29.49,0 0,-4.293 -29.49,0 0,-20.898 31.87,0 0,-4.301 -36.91,0 0,52.91 36.54,0"
|
||||
id="path3022"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1139.76,175.816 30.83,-44.757 0.15,0 0,44.757 5.04,0 0,-52.91 -5.64,0 -30.82,44.754 -0.15,0 0,-44.754 -5.04,0 0,52.91 5.63,0"
|
||||
id="path3024"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1188.15,175.816 17.19,-47.355 0.15,0 17.06,47.355 5.34,0 -19.65,-52.91 -5.86,0 -19.56,52.91 5.33,0"
|
||||
id="path3026"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1235.15,122.906 5.043,0 0,52.9102 -5.043,0 0,-52.9102 z"
|
||||
id="path3028"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1277.3,150.691 c 1.54,0 2.99,0.243 4.38,0.711 1.39,0.469 2.59,1.145 3.63,2.036 1.04,0.886 1.86,1.968 2.48,3.222 0.63,1.258 0.92,2.703 0.92,4.336 0,3.262 -0.94,5.824 -2.81,7.703 -1.88,1.875 -4.74,2.821 -8.6,2.821 l -18.82,0 0,-20.829 18.82,0 z m 0.38,25.125 c 2.16,0 4.23,-0.269 6.19,-0.816 1.95,-0.543 3.65,-1.367 5.1,-2.48 1.46,-1.118 2.62,-2.54 3.49,-4.297 0.86,-1.758 1.29,-3.821 1.29,-6.192 0,-3.359 -0.86,-6.273 -2.6,-8.742 -1.72,-2.473 -4.29,-4.055 -7.69,-4.746 l 0,-0.145 c 1.73,-0.25 3.16,-0.703 4.29,-1.367 1.14,-0.668 2.06,-1.527 2.78,-2.558 0.72,-1.035 1.23,-2.239 1.56,-3.594 0.31,-1.363 0.53,-2.832 0.62,-4.41 0.05,-0.887 0.1,-1.977 0.16,-3.262 0.05,-1.285 0.15,-2.582 0.29,-3.891 0.15,-1.312 0.38,-2.543 0.71,-3.711 0.32,-1.156 0.74,-2.054 1.3,-2.699 l -5.56,0 c -0.29,0.496 -0.54,1.098 -0.7,1.809 -0.18,0.723 -0.31,1.461 -0.37,2.234 -0.08,0.762 -0.14,1.512 -0.2,2.254 -0.05,0.742 -0.1,1.387 -0.14,1.926 -0.09,1.875 -0.26,3.742 -0.49,5.598 -0.22,1.851 -0.69,3.503 -1.4,4.961 -0.72,1.46 -1.76,2.632 -3.11,3.523 -1.36,0.887 -3.23,1.281 -5.6,1.191 l -19.12,0 0,-23.496 -5.03,0 0,52.91 24.23,0"
|
||||
id="path3030"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1308.46,140.879 c 0.77,-2.793 1.95,-5.289 3.56,-7.484 1.61,-2.2 3.66,-3.965 6.19,-5.301 2.52,-1.336 5.54,-2.004 9.04,-2.004 3.51,0 6.51,0.668 9,2.004 2.5,1.336 4.55,3.101 6.15,5.301 1.6,2.195 2.8,4.691 3.56,7.484 0.77,2.789 1.15,5.613 1.15,8.48 0,2.914 -0.38,5.758 -1.15,8.52 -0.76,2.769 -1.96,5.25 -3.56,7.449 -1.6,2.199 -3.65,3.969 -6.15,5.297 -2.49,1.336 -5.49,2.008 -9,2.008 -3.5,0 -6.52,-0.672 -9.04,-2.008 -2.53,-1.328 -4.58,-3.098 -6.19,-5.297 -1.61,-2.199 -2.79,-4.68 -3.56,-7.449 -0.75,-2.762 -1.15,-5.606 -1.15,-8.52 0,-2.867 0.4,-5.691 1.15,-8.48 z m -4.63,18.934 c 1.04,3.308 2.6,6.23 4.67,8.777 2.08,2.547 4.68,4.57 7.82,6.078 3.14,1.504 6.79,2.254 10.93,2.254 4.15,0 7.78,-0.75 10.89,-2.254 3.11,-1.508 5.71,-3.531 7.78,-6.078 2.08,-2.547 3.64,-5.469 4.67,-8.777 1.05,-3.313 1.56,-6.797 1.56,-10.454 0,-3.656 -0.51,-7.136 -1.56,-10.449 -1.03,-3.308 -2.59,-6.226 -4.67,-8.738 -2.07,-2.52 -4.67,-4.531 -7.78,-6.043 -3.11,-1.5 -6.74,-2.266 -10.89,-2.266 -4.14,0 -7.79,0.766 -10.93,2.266 -3.14,1.512 -5.74,3.523 -7.82,6.043 -2.07,2.512 -3.63,5.43 -4.67,8.738 -1.04,3.313 -1.54,6.793 -1.54,10.449 0,3.657 0.5,7.141 1.54,10.454"
|
||||
id="path3032"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1367.77,175.816 30.84,-44.757 0.15,0 0,44.757 5.05,0 0,-52.91 -5.64,0 -30.83,44.754 -0.15,0 0,-44.754 -5.04,0 0,52.91 5.62,0"
|
||||
id="path3034"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1423.89,175.816 18.3,-46.386 18.22,46.386 7.41,0 0,-52.91 -5.03,0 0,45.723 -0.15,0 -18.09,-45.723 -4.74,0 -18.15,45.723 -0.15,0 0,-45.723 -5.04,0 0,52.91 7.42,0"
|
||||
id="path3036"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1517.1,175.816 0,-4.296 -31.48,0 0,-19.122 29.48,0 0,-4.293 -29.48,0 0,-20.898 31.86,0 0,-4.301 -36.9,0 0,52.91 36.52,0"
|
||||
id="path3038"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1532.29,175.816 30.82,-44.757 0.14,0 0,44.757 5.03,0 0,-52.91 -5.62,0 -30.81,44.754 -0.15,0 0,-44.754 -5.03,0 0,52.91 5.62,0"
|
||||
id="path3040"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1617.34,175.816 0,-4.296 -18.44,0 0,-48.614 -5.04,0 0,48.614 -18.38,0 0,4.296 41.86,0"
|
||||
id="path3042"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1647.8,143.656 -10.22,27.121 -10.6,-27.121 20.82,0 z m -7.18,32.16 20.74,-52.91 -5.4,0 -6.46,16.449 -24.07,0 -6.38,-16.449 -5.33,0 21.26,52.91 5.64,0"
|
||||
id="path3044"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1673.44,175.816 0,-48.609 29.65,0 0,-4.301 -34.68,0 0,52.91 5.03,0"
|
||||
id="path3046"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1769.74,165.254 c -1.02,1.605 -2.25,2.953 -3.71,4.043 -1.46,1.082 -3.06,1.914 -4.81,2.48 -1.76,0.567 -3.6,0.856 -5.52,0.856 -3.51,0 -6.53,-0.672 -9.05,-2.008 -2.52,-1.328 -4.58,-3.098 -6.18,-5.297 -1.61,-2.199 -2.8,-4.68 -3.56,-7.449 -0.77,-2.762 -1.15,-5.606 -1.15,-8.52 0,-2.867 0.38,-5.691 1.15,-8.48 0.76,-2.793 1.95,-5.289 3.56,-7.484 1.6,-2.2 3.66,-3.965 6.18,-5.301 2.52,-1.336 5.54,-2.004 9.05,-2.004 2.46,0 4.69,0.449 6.66,1.336 1.98,0.89 3.68,2.101 5.12,3.633 1.43,1.523 2.59,3.324 3.48,5.371 0.89,2.05 1.45,4.261 1.71,6.633 l 5.03,0 c -0.35,-3.258 -1.11,-6.204 -2.3,-8.821 -1.18,-2.617 -2.7,-4.84 -4.59,-6.664 -1.88,-1.824 -4.08,-3.238 -6.63,-4.223 -2.54,-0.992 -5.37,-1.492 -8.48,-1.492 -4.17,0 -7.81,0.766 -10.93,2.266 -3.15,1.512 -5.76,3.523 -7.83,6.043 -2.07,2.512 -3.62,5.43 -4.65,8.738 -1.04,3.313 -1.57,6.793 -1.57,10.449 0,3.657 0.53,7.141 1.57,10.454 1.03,3.308 2.58,6.23 4.65,8.777 2.07,2.547 4.68,4.57 7.83,6.078 3.12,1.504 6.76,2.254 10.93,2.254 2.52,0 4.97,-0.371 7.38,-1.106 2.39,-0.738 4.56,-1.843 6.51,-3.296 1.95,-1.461 3.58,-3.254 4.89,-5.372 1.3,-2.125 2.13,-4.574 2.47,-7.335 l -5.04,0 c -0.43,2.019 -1.16,3.839 -2.17,5.441"
|
||||
id="path3048"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1791.01,140.879 c 0.76,-2.793 1.95,-5.289 3.56,-7.484 1.6,-2.2 3.66,-3.965 6.18,-5.301 2.52,-1.336 5.53,-2.004 9.04,-2.004 3.51,0 6.5,0.668 9.01,2.004 2.49,1.336 4.54,3.101 6.14,5.301 1.61,2.195 2.79,4.691 3.57,7.484 0.75,2.789 1.14,5.613 1.14,8.48 0,2.914 -0.39,5.758 -1.14,8.52 -0.78,2.769 -1.96,5.25 -3.57,7.449 -1.6,2.199 -3.65,3.969 -6.14,5.297 -2.51,1.336 -5.5,2.008 -9.01,2.008 -3.51,0 -6.52,-0.672 -9.04,-2.008 -2.52,-1.328 -4.58,-3.098 -6.18,-5.297 -1.61,-2.199 -2.8,-4.68 -3.56,-7.449 -0.78,-2.762 -1.16,-5.606 -1.16,-8.52 0,-2.867 0.38,-5.691 1.16,-8.48 z m -4.64,18.934 c 1.04,3.308 2.6,6.23 4.67,8.777 2.08,2.547 4.68,4.57 7.82,6.078 3.13,1.504 6.77,2.254 10.93,2.254 4.15,0 7.78,-0.75 10.89,-2.254 3.11,-1.508 5.72,-3.531 7.79,-6.078 2.07,-2.547 3.62,-5.469 4.66,-8.777 1.04,-3.313 1.56,-6.797 1.56,-10.454 0,-3.656 -0.52,-7.136 -1.56,-10.449 -1.04,-3.308 -2.59,-6.226 -4.66,-8.738 -2.07,-2.52 -4.68,-4.531 -7.79,-6.043 -3.11,-1.5 -6.74,-2.266 -10.89,-2.266 -4.16,0 -7.8,0.766 -10.93,2.266 -3.14,1.512 -5.74,3.523 -7.82,6.043 -2.07,2.512 -3.63,5.43 -4.67,8.738 -1.04,3.313 -1.56,6.793 -1.56,10.449 0,3.657 0.52,7.141 1.56,10.454"
|
||||
id="path3050"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1850.33,175.816 30.82,-44.757 0.15,0 0,44.757 5.04,0 0,-52.91 -5.64,0 -30.82,44.754 -0.15,0 0,-44.754 -5.04,0 0,52.91 5.64,0"
|
||||
id="path3052"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1935.39,175.816 0,-4.296 -18.45,0 0,-48.614 -5.04,0 0,48.614 -18.37,0 0,4.296 41.86,0"
|
||||
id="path3054"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1965.86,143.656 -10.23,27.121 -10.6,-27.121 20.83,0 z m -7.21,32.16 20.76,-52.91 -5.41,0 -6.44,16.449 -24.08,0 -6.38,-16.449 -5.33,0 21.26,52.91 5.62,0"
|
||||
id="path3056"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1993.71,175.816 18.3,-46.386 18.24,46.386 7.41,0 0,-52.91 -5.04,0 0,45.723 -0.15,0 -18.09,-45.723 -4.73,0 -18.16,45.723 -0.14,0 0,-45.723 -5.05,0 0,52.91 7.41,0"
|
||||
id="path3058"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 2050.78,122.906 5.0273,0 0,52.9102 -5.0273,0 0,-52.9102 z"
|
||||
id="path3060"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 2074.63,175.816 30.83,-44.757 0.15,0 0,44.757 5.04,0 0,-52.91 -5.63,0 -30.83,44.754 -0.15,0 0,-44.754 -5.04,0 0,52.91 5.63,0"
|
||||
id="path3062"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 2151.78,143.656 -10.24,27.121 -10.58,-27.121 20.82,0 z m -7.2,32.16 20.76,-52.91 -5.41,0 -6.45,16.449 -24.09,0 -6.36,-16.449 -5.34,0 21.27,52.91 5.62,0"
|
||||
id="path3064"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 2177.93,175.816 30.82,-44.757 0.16,0 0,44.757 5.05,0 0,-52.91 -5.64,0 -30.84,44.754 -0.14,0 0,-44.754 -5.04,0 0,52.91 5.63,0"
|
||||
id="path3066"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 2263.01,175.816 0,-4.296 -18.46,0 0,-48.614 -5.04,0 0,48.614 -18.38,0 0,4.296 41.88,0"
|
||||
id="path3068"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 945.785,32.6602 c 1.875,0 3.656,0.1562 5.336,0.4804 1.676,0.3164 3.16,0.8985 4.445,1.7383 1.286,0.8477 2.301,1.9649 3.043,3.375 0.739,1.4102 1.11,3.1719 1.11,5.2969 0,3.4101 -1.203,5.9648 -3.602,7.668 -2.387,1.7031 -5.836,2.5585 -10.332,2.5585 l -17.344,0 0,-21.1171 17.344,0 z m 0,25.414 c 2.028,0 3.778,0.2344 5.262,0.711 1.48,0.4609 2.719,1.1015 3.711,1.9218 0.98,0.8125 1.726,1.7696 2.215,2.8555 0.5,1.082 0.742,2.2422 0.742,3.4766 0,6.6211 -3.977,9.9375 -11.93,9.9375 l -17.344,0 0,-18.9024 17.344,0 z m 0,23.1953 c 2.223,0 4.36,-0.2109 6.41,-0.6289 2.051,-0.4218 3.852,-1.1328 5.41,-2.1484 1.559,-1.0156 2.805,-2.3438 3.743,-4 0.937,-1.6563 1.406,-3.7227 1.406,-6.1914 0,-1.3867 -0.219,-2.7305 -0.668,-4.0391 -0.445,-1.3086 -1.074,-2.4961 -1.887,-3.5547 -0.808,-1.0625 -1.781,-1.9687 -2.89,-2.7031 -1.114,-0.7422 -2.36,-1.2617 -3.739,-1.5586 l 0,-0.1523 c 3.403,-0.4453 6.121,-1.8321 8.145,-4.1797 2.031,-2.3477 3.043,-5.25 3.043,-8.711 0,-0.8398 -0.074,-1.7851 -0.227,-2.8515 -0.144,-1.0625 -0.437,-2.1524 -0.886,-3.2617 -0.446,-1.1133 -1.086,-2.2149 -1.93,-3.2969 -0.836,-1.0859 -1.957,-2.0391 -3.363,-2.8516 -1.414,-0.8203 -3.141,-1.4883 -5.192,-2.0039 -2.051,-0.5195 -4.508,-0.7812 -7.375,-0.7812 l -22.379,0 0,52.914 22.379,0"
|
||||
id="path3070"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 975.426,28.3555 5.04297,0 0,52.9141 -5.04297,0 0,-52.9141 z"
|
||||
id="path3072"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 997.105,46.3281 c 0.762,-2.789 1.95,-5.2812 3.555,-7.4804 1.6,-2.1993 3.67,-3.9688 6.18,-5.2969 2.53,-1.3399 5.54,-2.0039 9.04,-2.0039 3.51,0 6.52,0.664 9.01,2.0039 2.5,1.3281 4.54,3.0976 6.15,5.2969 1.61,2.1992 2.79,4.6914 3.55,7.4804 0.77,2.793 1.16,5.6211 1.16,8.4844 0,2.918 -0.39,5.7578 -1.16,8.5273 -0.76,2.7618 -1.94,5.2461 -3.55,7.4454 -1.61,2.2031 -3.65,3.9648 -6.15,5.3007 -2.49,1.3321 -5.5,2 -9.01,2 -3.5,0 -6.51,-0.6679 -9.04,-2 -2.51,-1.3359 -4.58,-3.0976 -6.18,-5.3007 -1.605,-2.1993 -2.793,-4.6836 -3.555,-7.4454 -0.773,-2.7695 -1.152,-5.6093 -1.152,-8.5273 0,-2.8633 0.379,-5.6914 1.152,-8.4844 z m -4.632,18.9336 c 1.035,3.3125 2.59,6.2383 4.668,8.7813 2.074,2.5429 4.679,4.5742 7.819,6.0781 3.13,1.5078 6.77,2.2578 10.92,2.2578 4.15,0 7.79,-0.75 10.9,-2.2578 3.11,-1.5039 5.71,-3.5352 7.78,-6.0781 2.08,-2.543 3.63,-5.4688 4.67,-8.7813 1.04,-3.3047 1.56,-6.789 1.56,-10.4492 0,-3.6602 -0.52,-7.1406 -1.56,-10.4414 -1.04,-3.3164 -2.59,-6.2305 -4.67,-8.7461 -2.07,-2.5195 -4.67,-4.5312 -7.78,-6.0391 -3.11,-1.5039 -6.75,-2.2656 -10.9,-2.2656 -4.15,0 -7.79,0.7617 -10.92,2.2656 -3.14,1.5079 -5.745,3.5196 -7.819,6.0391 -2.078,2.5156 -3.633,5.4297 -4.668,8.7461 -1.035,3.3008 -1.559,6.7812 -1.559,10.4414 0,3.6602 0.524,7.1445 1.559,10.4492"
|
||||
id="path3074"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1087.1,81.2695 0,-4.2929 -18.45,0 0,-48.6211 -5.04,0 0,48.6211 -18.38,0 0,4.2929 41.87,0"
|
||||
id="path3076"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1118.15,56.1484 c 1.53,0 2.99,0.2383 4.37,0.7071 1.39,0.4687 2.6,1.1484 3.63,2.0351 1.04,0.8907 1.86,1.9688 2.49,3.2227 0.61,1.2617 0.93,2.7109 0.93,4.332 0,3.2656 -0.95,5.836 -2.82,7.7031 -1.88,1.8829 -4.75,2.8282 -8.6,2.8282 l -18.82,0 0,-20.8282 18.82,0 z m 0.37,25.1211 c 2.17,0 4.23,-0.2695 6.19,-0.8203 1.95,-0.539 3.65,-1.3633 5.11,-2.4765 1.46,-1.1133 2.62,-2.543 3.49,-4.3008 0.86,-1.7578 1.29,-3.8125 1.29,-6.1875 0,-3.3594 -0.86,-6.2696 -2.59,-8.7461 -1.73,-2.4688 -4.3,-4.0469 -7.71,-4.7383 l 0,-0.1484 c 1.73,-0.25 3.16,-0.7032 4.3,-1.3672 1.14,-0.668 2.06,-1.5235 2.78,-2.5586 0.71,-1.0391 1.23,-2.2344 1.55,-3.5977 0.33,-1.3593 0.54,-2.8281 0.63,-4.4062 0.05,-0.8867 0.1,-1.9766 0.15,-3.2656 0.05,-1.2852 0.15,-2.5782 0.3,-3.8868 0.15,-1.3125 0.38,-2.5468 0.71,-3.707 0.31,-1.1562 0.74,-2.0586 1.29,-2.707 l -5.56,0 c -0.29,0.5 -0.53,1.1015 -0.7,1.8203 -0.17,0.7187 -0.3,1.4531 -0.37,2.2265 -0.07,0.7618 -0.14,1.5118 -0.19,2.25 -0.04,0.7461 -0.1,1.3946 -0.15,1.9297 -0.1,1.875 -0.26,3.7461 -0.48,5.6016 -0.22,1.8555 -0.69,3.5 -1.41,4.9609 -0.71,1.4532 -1.75,2.6289 -3.11,3.5235 -1.36,0.8906 -3.22,1.2812 -5.59,1.1875 l -19.12,0 0,-23.5 -5.04,0 0,52.914 24.23,0"
|
||||
id="path3078"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1174.25,49.1055 -10.23,27.125 -10.6,-27.125 20.83,0 z m -7.19,32.164 20.75,-52.914 -5.41,0 -6.45,16.457 -24.08,0 -6.38,-16.457 -5.33,0 21.27,52.914 5.63,0"
|
||||
id="path3080"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1200.41,81.2695 30.83,-44.7617 0.15,0 0,44.7617 5.04,0 0,-52.914 -5.63,0 -30.84,44.7578 -0.15,0 0,-44.7578 -5.04,0 0,52.914 5.64,0"
|
||||
id="path3082"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1252.87,38.9609 c 0.9,-1.8281 2.12,-3.289 3.67,-4.375 1.57,-1.0898 3.4,-1.8671 5.52,-2.3359 2.12,-0.4727 4.4,-0.7031 6.83,-0.7031 1.37,0 2.89,0.1914 4.52,0.5976 1.62,0.3907 3.14,1.0196 4.55,1.8828 1.42,0.8711 2.58,1.9727 3.52,3.336 0.94,1.3515 1.41,3.0039 1.41,4.9258 0,1.4843 -0.33,2.7695 -1.01,3.8593 -0.66,1.086 -1.53,1.9961 -2.58,2.7383 -1.07,0.7422 -2.24,1.3438 -3.53,1.8164 -1.28,0.4688 -2.55,0.8555 -3.78,1.1524 l -11.78,2.8789 c -1.54,0.4062 -3.02,0.8906 -4.49,1.4922 -1.44,0.5898 -2.72,1.375 -3.81,2.3672 -1.09,0.9843 -1.95,2.2031 -2.63,3.6289 -0.67,1.4336 -1,3.1875 -1,5.2617 0,1.289 0.25,2.7929 0.74,4.5234 0.49,1.7266 1.42,3.3594 2.79,4.8906 1.35,1.5274 3.21,2.8243 5.59,3.8907 2.37,1.0625 5.4,1.5898 9.1,1.5898 2.62,0 5.13,-0.3398 7.49,-1.0351 2.38,-0.6915 4.47,-1.7305 6.22,-3.1133 1.8,-1.3789 3.21,-3.0977 4.26,-5.1446 1.08,-2.0546 1.6,-4.4453 1.6,-7.1601 l -5.03,0 c -0.1,2.0351 -0.56,3.7969 -1.37,5.3047 -0.82,1.5039 -1.88,2.7617 -3.19,3.7773 -1.3,1.0117 -2.81,1.7852 -4.53,2.3008 -1.7,0.5195 -3.49,0.7773 -5.37,0.7773 -1.72,0 -3.39,-0.1914 -5,-0.5586 -1.61,-0.371 -3.01,-0.9609 -4.22,-1.7812 -1.21,-0.8164 -2.18,-1.8906 -2.93,-3.2227 -0.74,-1.332 -1.11,-2.9882 -1.11,-4.9648 0,-1.2305 0.22,-2.3086 0.64,-3.2227 0.42,-0.914 0.98,-1.6953 1.72,-2.332 0.75,-0.6484 1.61,-1.1601 2.56,-1.5547 0.98,-0.4023 1.99,-0.7226 3.08,-0.9687 l 12.9,-3.1875 c 1.88,-0.4883 3.64,-1.0938 5.3,-1.8164 1.65,-0.711 3.11,-1.6055 4.37,-2.6602 1.27,-1.0625 2.24,-2.3633 2.97,-3.8945 0.71,-1.5313 1.07,-3.3867 1.07,-5.5586 0,-0.5899 -0.07,-1.3828 -0.2,-2.3672 -0.11,-0.9922 -0.41,-2.0313 -0.87,-3.1523 -0.47,-1.1133 -1.14,-2.2344 -2,-3.3711 -0.87,-1.1368 -2.06,-2.1602 -3.56,-3.0782 -1.51,-0.9062 -3.37,-1.6562 -5.6,-2.2226 -2.22,-0.5586 -4.88,-0.8516 -8,-0.8516 -3.11,0 -6,0.3594 -8.68,1.0781 -2.65,0.7188 -4.94,1.8125 -6.81,3.2969 -1.88,1.4844 -3.32,3.3789 -4.34,5.707 -1,2.3204 -1.43,5.1055 -1.3,8.375 l 5.05,0 c -0.06,-2.7148 0.37,-4.9921 1.25,-6.8164"
|
||||
id="path3084"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1331.72,81.2695 0,-4.2929 -28.53,0 0,-19.1211 25.35,0 0,-4.3008 -25.35,0 0,-25.1992 -5.04,0 0,52.914 33.57,0"
|
||||
id="path3086"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1343.54,46.3281 c 0.78,-2.789 1.95,-5.2812 3.55,-7.4804 1.6,-2.1993 3.68,-3.9688 6.2,-5.2969 2.51,-1.3399 5.53,-2.0039 9.03,-2.0039 3.51,0 6.51,0.664 9.01,2.0039 2.5,1.3281 4.55,3.0976 6.15,5.2969 1.6,2.1992 2.78,4.6914 3.56,7.4804 0.76,2.793 1.15,5.6211 1.15,8.4844 0,2.918 -0.39,5.7578 -1.15,8.5273 -0.78,2.7618 -1.96,5.2461 -3.56,7.4454 -1.6,2.2031 -3.65,3.9648 -6.15,5.3007 -2.5,1.3321 -5.5,2 -9.01,2 -3.5,0 -6.52,-0.6679 -9.03,-2 -2.52,-1.3359 -4.6,-3.0976 -6.2,-5.3007 -1.6,-2.1993 -2.77,-4.6836 -3.55,-7.4454 -0.77,-2.7695 -1.14,-5.6093 -1.14,-8.5273 0,-2.8633 0.37,-5.6914 1.14,-8.4844 z m -4.63,18.9336 c 1.03,3.3125 2.59,6.2383 4.66,8.7813 2.09,2.5429 4.69,4.5742 7.82,6.0781 3.14,1.5078 6.79,2.2578 10.93,2.2578 4.15,0 7.8,-0.75 10.9,-2.2578 3.11,-1.5039 5.7,-3.5352 7.78,-6.0781 2.09,-2.543 3.63,-5.4688 4.66,-8.7813 1.04,-3.3047 1.57,-6.789 1.57,-10.4492 0,-3.6602 -0.53,-7.1406 -1.57,-10.4414 -1.03,-3.3164 -2.57,-6.2305 -4.66,-8.7461 -2.08,-2.5195 -4.67,-4.5312 -7.78,-6.0391 -3.1,-1.5039 -6.75,-2.2656 -10.9,-2.2656 -4.14,0 -7.79,0.7617 -10.93,2.2656 -3.13,1.5079 -5.73,3.5196 -7.82,6.0391 -2.07,2.5156 -3.63,5.4297 -4.66,8.7461 -1.04,3.3008 -1.56,6.7812 -1.56,10.4414 0,3.6602 0.52,7.1445 1.56,10.4492"
|
||||
id="path3088"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1421.16,56.1484 c 1.54,0 3,0.2383 4.39,0.7071 1.37,0.4687 2.58,1.1484 3.62,2.0351 1.04,0.8907 1.87,1.9688 2.49,3.2227 0.61,1.2617 0.91,2.7109 0.91,4.332 0,3.2656 -0.93,5.836 -2.81,7.7031 -1.88,1.8829 -4.73,2.8282 -8.6,2.8282 l -18.83,0 0,-20.8282 18.83,0 z m 0.37,25.1211 c 2.18,0 4.24,-0.2695 6.18,-0.8203 1.96,-0.539 3.67,-1.3633 5.12,-2.4765 1.47,-1.1133 2.63,-2.543 3.49,-4.3008 0.86,-1.7578 1.3,-3.8125 1.3,-6.1875 0,-3.3594 -0.87,-6.2696 -2.6,-8.7461 -1.72,-2.4688 -4.3,-4.0469 -7.71,-4.7383 l 0,-0.1484 c 1.74,-0.25 3.17,-0.7032 4.3,-1.3672 1.13,-0.668 2.06,-1.5235 2.78,-2.5586 0.72,-1.0391 1.24,-2.2344 1.56,-3.5977 0.32,-1.3593 0.52,-2.8281 0.63,-4.4062 0.05,-0.8867 0.1,-1.9766 0.15,-3.2656 0.05,-1.2852 0.15,-2.5782 0.29,-3.8868 0.16,-1.3125 0.38,-2.5468 0.7,-3.707 0.33,-1.1562 0.76,-2.0586 1.3,-2.707 l -5.55,0 c -0.31,0.5 -0.54,1.1015 -0.71,1.8203 -0.17,0.7187 -0.29,1.4531 -0.37,2.2265 -0.08,0.7618 -0.13,1.5118 -0.18,2.25 -0.05,0.7461 -0.1,1.3946 -0.15,1.9297 -0.1,1.875 -0.25,3.7461 -0.48,5.6016 -0.22,1.8555 -0.7,3.5 -1.41,4.9609 -0.73,1.4532 -1.76,2.6289 -3.12,3.5235 -1.36,0.8906 -3.21,1.2812 -5.59,1.1875 l -19.13,0 0,-23.5 -5.03,0 0,52.914 24.23,0"
|
||||
id="path3090"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1456.21,81.2695 18.3,-46.3906 18.24,46.3906 7.41,0 0,-52.914 -5.04,0 0,45.7265 -0.15,0 -18.08,-45.7265 -4.74,0 -18.17,45.7265 -0.13,0 0,-45.7265 -5.05,0 0,52.914 7.41,0"
|
||||
id="path3092"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1541.21,49.1055 -10.22,27.125 -10.6,-27.125 20.82,0 z m -7.19,32.164 20.74,-52.914 -5.42,0 -6.42,16.457 -24.08,0 -6.38,-16.457 -5.33,0 21.27,52.914 5.62,0"
|
||||
id="path3094"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1593,81.2695 0,-4.2929 -18.47,0 0,-48.6211 -5.04,0 0,48.6211 -18.37,0 0,4.2929 41.88,0"
|
||||
id="path3096"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1600.55,28.3555 5.0391,0 0,52.9141 -5.0391,0 0,-52.9141 z"
|
||||
id="path3098"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1622.23,46.3281 c 0.76,-2.789 1.94,-5.2812 3.55,-7.4804 1.6,-2.1993 3.67,-3.9688 6.19,-5.2969 2.52,-1.3399 5.53,-2.0039 9.04,-2.0039 3.5,0 6.5,0.664 9.01,2.0039 2.48,1.3281 4.54,3.0976 6.14,5.2969 1.61,2.1992 2.8,4.6914 3.56,7.4804 0.76,2.793 1.15,5.6211 1.15,8.4844 0,2.918 -0.39,5.7578 -1.15,8.5273 -0.76,2.7618 -1.95,5.2461 -3.56,7.4454 -1.6,2.2031 -3.66,3.9648 -6.14,5.3007 -2.51,1.3321 -5.51,2 -9.01,2 -3.51,0 -6.52,-0.6679 -9.04,-2 -2.52,-1.3359 -4.59,-3.0976 -6.19,-5.3007 -1.61,-2.1993 -2.79,-4.6836 -3.55,-7.4454 -0.77,-2.7695 -1.16,-5.6093 -1.16,-8.5273 0,-2.8633 0.39,-5.6914 1.16,-8.4844 z m -4.64,18.9336 c 1.03,3.3125 2.6,6.2383 4.68,8.7813 2.07,2.5429 4.67,4.5742 7.8,6.0781 3.14,1.5078 6.79,2.2578 10.94,2.2578 4.16,0 7.78,-0.75 10.88,-2.2578 3.13,-1.5039 5.72,-3.5352 7.8,-6.0781 2.07,-2.543 3.62,-5.4688 4.66,-8.7813 1.03,-3.3047 1.55,-6.789 1.55,-10.4492 0,-3.6602 -0.52,-7.1406 -1.55,-10.4414 -1.04,-3.3164 -2.59,-6.2305 -4.66,-8.7461 -2.08,-2.5195 -4.67,-4.5312 -7.8,-6.0391 -3.1,-1.5039 -6.72,-2.2656 -10.88,-2.2656 -4.15,0 -7.8,0.7617 -10.94,2.2656 -3.13,1.5079 -5.73,3.5196 -7.8,6.0391 -2.08,2.5156 -3.65,5.4297 -4.68,8.7461 -1.03,3.3008 -1.55,6.7812 -1.55,10.4414 0,3.6602 0.52,7.1445 1.55,10.4492"
|
||||
id="path3100"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1681.55,81.2695 30.82,-44.7617 0.15,0 0,44.7617 5.04,0 0,-52.914 -5.64,0 -30.82,44.7578 -0.15,0 0,-44.7578 -5.03,0 0,52.914 5.63,0"
|
||||
id="path3102"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1775.58,55.332 c 3.51,0 6.35,0.8946 8.52,2.6719 2.17,1.7774 3.26,4.4922 3.26,8.1484 0,3.6602 -1.09,6.3711 -3.26,8.1485 -2.17,1.7851 -5.01,2.6758 -8.52,2.6758 l -17.35,0 0,-21.6446 17.35,0 z m 1.12,25.9375 c 2.36,0 4.51,-0.3359 6.43,-0.9961 1.94,-0.6679 3.58,-1.6562 4.99,-2.9648 1.37,-1.3125 2.43,-2.9063 3.17,-4.7852 0.74,-1.875 1.11,-4.0039 1.11,-6.3711 0,-2.3671 -0.37,-4.4921 -1.11,-6.371 -0.74,-1.8829 -1.8,-3.4766 -3.17,-4.7852 -1.41,-1.3047 -3.05,-2.293 -4.99,-2.9609 -1.92,-0.6641 -4.07,-0.9961 -6.43,-0.9961 l -18.47,0 0,-22.6836 -5.04,0 0,52.914 23.51,0"
|
||||
id="path3104"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1824.93,49.1055 -10.22,27.125 -10.6,-27.125 20.82,0 z m -7.19,32.164 20.76,-52.914 -5.41,0 -6.45,16.457 -24.09,0 -6.38,-16.457 -5.33,0 21.28,52.914 5.62,0"
|
||||
id="path3106"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1876.73,81.2695 0,-4.2929 -18.45,0 0,-48.6211 -5.04,0 0,48.6211 -18.38,0 0,4.2929 41.87,0"
|
||||
id="path3108"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1888.96,81.2695 0,-22.9687 31.41,0 0,22.9687 5.04,0 0,-52.914 -5.04,0 0,25.6445 -31.41,0 0,-25.6445 -5.04,0 0,52.914 5.04,0"
|
||||
id="path3110"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 1938.38,81.2695 12.01,-46.3125 0.15,0 12.9,46.3125 6.3,0 12.96,-46.3125 0.15,0 12.07,46.3125 5.04,0 -14.59,-52.914 -5.34,0 -13.42,47.3515 -0.14,0 -13.34,-47.3515 -5.48,0 -14.67,52.914 5.4,0"
|
||||
id="path3112"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 2034.73,49.1055 -10.24,27.125 -10.59,-27.125 20.83,0 z m -7.2,32.164 20.75,-52.914 -5.41,0 -6.44,16.457 -24.09,0 -6.37,-16.457 -5.34,0 21.27,52.914 5.63,0"
|
||||
id="path3114"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 2043.84,81.2695 5.93,0 17.41,-26.8281 17.33,26.8281 6.01,0 -20.9,-31.125 0,-21.789 -5.04,0 0,21.789 -20.74,31.125"
|
||||
id="path3116"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 2144.01,56.1484 c 1.55,0 3,0.2383 4.38,0.7071 1.39,0.4687 2.6,1.1484 3.63,2.0351 1.05,0.8907 1.88,1.9688 2.48,3.2227 0.62,1.2617 0.93,2.7109 0.93,4.332 0,3.2656 -0.94,5.836 -2.81,7.7031 -1.89,1.8829 -4.75,2.8282 -8.61,2.8282 l -18.81,0 0,-20.8282 18.81,0 z m 0.38,25.1211 c 2.18,0 4.23,-0.2695 6.19,-0.8203 1.95,-0.539 3.66,-1.3633 5.1,-2.4765 1.47,-1.1133 2.63,-2.543 3.5,-4.3008 0.86,-1.7578 1.29,-3.8125 1.29,-6.1875 0,-3.3594 -0.86,-6.2696 -2.59,-8.7461 -1.73,-2.4688 -4.3,-4.0469 -7.7,-4.7383 l 0,-0.1484 c 1.72,-0.25 3.15,-0.7032 4.28,-1.3672 1.15,-0.668 2.07,-1.5235 2.79,-2.5586 0.72,-1.0391 1.24,-2.2344 1.55,-3.5977 0.32,-1.3593 0.54,-2.8281 0.63,-4.4062 0.05,-0.8867 0.1,-1.9766 0.15,-3.2656 0.05,-1.2852 0.15,-2.5782 0.29,-3.8868 0.15,-1.3125 0.39,-2.5468 0.71,-3.707 0.33,-1.1562 0.76,-2.0586 1.3,-2.707 l -5.55,0 c -0.3,0.5 -0.54,1.1015 -0.71,1.8203 -0.17,0.7187 -0.3,1.4531 -0.38,2.2265 -0.07,0.7618 -0.13,1.5118 -0.18,2.25 -0.04,0.7461 -0.1,1.3946 -0.15,1.9297 -0.1,1.875 -0.26,3.7461 -0.49,5.6016 -0.22,1.8555 -0.68,3.5 -1.39,4.9609 -0.73,1.4532 -1.76,2.6289 -3.13,3.5235 -1.35,0.8906 -3.21,1.2812 -5.59,1.1875 l -19.11,0 0,-23.5 -5.04,0 0,52.914 24.23,0"
|
||||
id="path3118"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 2208.34,81.2695 0,-4.2929 -31.49,0 0,-19.1211 29.49,0 0,-4.3008 -29.49,0 0,-20.8945 31.87,0 0,-4.3047 -36.91,0 0,52.914 36.53,0"
|
||||
id="path3120"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 2221.6,38.9609 c 0.9,-1.8281 2.13,-3.289 3.66,-4.375 1.58,-1.0898 3.4,-1.8671 5.53,-2.3359 2.12,-0.4727 4.41,-0.7031 6.82,-0.7031 1.38,0 2.9,0.1914 4.53,0.5976 1.62,0.3907 3.13,1.0196 4.55,1.8828 1.41,0.8711 2.58,1.9727 3.52,3.336 0.94,1.3515 1.4,3.0039 1.4,4.9258 0,1.4843 -0.32,2.7695 -0.99,3.8593 -0.66,1.086 -1.55,1.9961 -2.59,2.7383 -1.06,0.7422 -2.24,1.3438 -3.53,1.8164 -1.28,0.4688 -2.54,0.8555 -3.78,1.1524 l -11.77,2.8789 c -1.55,0.4062 -3.03,0.8906 -4.5,1.4922 -1.45,0.5898 -2.73,1.375 -3.82,2.3672 -1.08,0.9843 -1.95,2.2031 -2.62,3.6289 -0.66,1.4336 -1,3.1875 -1,5.2617 0,1.289 0.24,2.7929 0.74,4.5234 0.5,1.7266 1.42,3.3594 2.78,4.8906 1.36,1.5274 3.23,2.8243 5.59,3.8907 2.38,1.0625 5.41,1.5898 9.12,1.5898 2.61,0 5.11,-0.3398 7.48,-1.0351 2.37,-0.6915 4.46,-1.7305 6.24,-3.1133 1.77,-1.3789 3.19,-3.0977 4.24,-5.1446 1.08,-2.0546 1.6,-4.4453 1.6,-7.1601 l -5.03,0 c -0.1,2.0351 -0.55,3.7969 -1.38,5.3047 -0.81,1.5039 -1.87,2.7617 -3.18,3.7773 -1.31,1.0117 -2.82,1.7852 -4.53,2.3008 -1.71,0.5195 -3.48,0.7773 -5.37,0.7773 -1.73,0 -3.4,-0.1914 -5.01,-0.5586 -1.6,-0.371 -3,-0.9609 -4.21,-1.7812 -1.21,-0.8164 -2.19,-1.8906 -2.94,-3.2227 -0.74,-1.332 -1.1,-2.9882 -1.1,-4.9648 0,-1.2305 0.21,-2.3086 0.63,-3.2227 0.43,-0.914 0.99,-1.6953 1.73,-2.332 0.75,-0.6484 1.62,-1.1601 2.56,-1.5547 0.97,-0.4023 1.99,-0.7226 3.08,-0.9687 l 12.9,-3.1875 c 1.87,-0.4883 3.64,-1.0938 5.3,-1.8164 1.65,-0.711 3.11,-1.6055 4.37,-2.6602 1.26,-1.0625 2.24,-2.3633 2.97,-3.8945 0.71,-1.5313 1.07,-3.3867 1.07,-5.5586 0,-0.5899 -0.07,-1.3828 -0.2,-2.3672 -0.11,-0.9922 -0.41,-2.0313 -0.87,-3.1523 -0.48,-1.1133 -1.15,-2.2344 -2.01,-3.3711 -0.86,-1.1368 -2.05,-2.1602 -3.55,-3.0782 -1.5,-0.9062 -3.37,-1.6562 -5.6,-2.2226 -2.22,-0.5586 -4.89,-0.8516 -8.01,-0.8516 -3.11,0 -5.99,0.3594 -8.67,1.0781 -2.66,0.7188 -4.94,1.8125 -6.8,3.2969 -1.89,1.4844 -3.33,3.3789 -4.36,5.707 -0.99,2.3204 -1.43,5.1055 -1.29,8.375 l 5.04,0 c -0.05,-2.7148 0.38,-4.9921 1.26,-6.8164"
|
||||
id="path3122"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 2270.24,46.3281 c 0.79,-2.789 1.96,-5.2812 3.57,-7.4804 1.6,-2.1993 3.67,-3.9688 6.18,-5.2969 2.52,-1.3399 5.54,-2.0039 9.04,-2.0039 3.51,0 6.52,0.664 9.02,2.0039 2.49,1.3281 4.54,3.0976 6.15,5.2969 1.6,2.1992 2.77,4.6914 3.55,7.4804 0.77,2.793 1.15,5.6211 1.15,8.4844 0,2.918 -0.38,5.7578 -1.15,8.5273 -0.78,2.7618 -1.95,5.2461 -3.55,7.4454 -1.61,2.2031 -3.66,3.9648 -6.15,5.3007 -2.5,1.3321 -5.51,2 -9.02,2 -3.5,0 -6.52,-0.6679 -9.04,-2 -2.51,-1.3359 -4.58,-3.0976 -6.18,-5.3007 -1.61,-2.1993 -2.78,-4.6836 -3.57,-7.4454 -0.75,-2.7695 -1.13,-5.6093 -1.13,-8.5273 0,-2.8633 0.38,-5.6914 1.13,-8.4844 z m -4.62,18.9336 c 1.04,3.3125 2.59,6.2383 4.66,8.7813 2.09,2.5429 4.68,4.5742 7.83,6.0781 3.14,1.5078 6.78,2.2578 10.92,2.2578 4.15,0 7.8,-0.75 10.9,-2.2578 3.11,-1.5039 5.7,-3.5352 7.79,-6.0781 2.07,-2.543 3.63,-5.4688 4.66,-8.7813 1.04,-3.3047 1.56,-6.789 1.56,-10.4492 0,-3.6602 -0.52,-7.1406 -1.56,-10.4414 -1.03,-3.3164 -2.59,-6.2305 -4.66,-8.7461 -2.09,-2.5195 -4.68,-4.5312 -7.79,-6.0391 -3.1,-1.5039 -6.75,-2.2656 -10.9,-2.2656 -4.14,0 -7.78,0.7617 -10.92,2.2656 -3.15,1.5079 -5.74,3.5196 -7.83,6.0391 -2.07,2.5156 -3.62,5.4297 -4.66,8.7461 -1.04,3.3008 -1.56,6.7812 -1.56,10.4414 0,3.6602 0.52,7.1445 1.56,10.4492"
|
||||
id="path3124"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 2328.53,81.2695 0,-32.7539 c 0,-3.0625 0.35,-5.664 1.03,-7.8242 0.69,-2.1406 1.71,-3.8984 3.05,-5.2578 1.33,-1.3555 2.97,-2.3477 4.88,-2.9648 1.93,-0.6172 4.11,-0.9219 6.52,-0.9219 2.47,0 4.67,0.3047 6.61,0.9219 1.93,0.6171 3.55,1.6093 4.88,2.9648 1.34,1.3594 2.35,3.1172 3.04,5.2578 0.69,2.1602 1.04,4.7617 1.04,7.8242 l 0,32.7539 5.04,0 0,-33.8672 c 0,-2.7148 -0.38,-5.2968 -1.14,-7.7382 -0.77,-2.4493 -1.99,-4.5899 -3.65,-6.418 -1.66,-1.8242 -3.76,-3.2695 -6.36,-4.332 -2.6,-1.0586 -5.74,-1.5938 -9.46,-1.5938 -3.65,0 -6.77,0.5352 -9.37,1.5938 -2.59,1.0625 -4.71,2.5078 -6.37,4.332 -1.66,1.8281 -2.87,3.9687 -3.63,6.418 -0.77,2.4414 -1.13,5.0234 -1.13,7.7382 l 0,33.8672 5.02,0"
|
||||
id="path3126"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 2400.87,56.1484 c 1.51,0 2.99,0.2383 4.36,0.7071 1.39,0.4687 2.59,1.1484 3.63,2.0351 1.03,0.8907 1.86,1.9688 2.49,3.2227 0.62,1.2617 0.92,2.7109 0.92,4.332 0,3.2656 -0.94,5.836 -2.82,7.7031 -1.86,1.8829 -4.73,2.8282 -8.58,2.8282 l -18.83,0 0,-20.8282 18.83,0 z m 0.36,25.1211 c 2.18,0 4.23,-0.2695 6.2,-0.8203 1.94,-0.539 3.64,-1.3633 5.11,-2.4765 1.45,-1.1133 2.61,-2.543 3.47,-4.3008 0.88,-1.7578 1.29,-3.8125 1.29,-6.1875 0,-3.3594 -0.85,-6.2696 -2.58,-8.7461 -1.73,-2.4688 -4.29,-4.0469 -7.71,-4.7383 l 0,-0.1484 c 1.73,-0.25 3.17,-0.7032 4.31,-1.3672 1.11,-0.668 2.05,-1.5235 2.77,-2.5586 0.71,-1.0391 1.23,-2.2344 1.55,-3.5977 0.32,-1.3593 0.52,-2.8281 0.63,-4.4062 0.05,-0.8867 0.11,-1.9766 0.16,-3.2656 0.04,-1.2852 0.15,-2.5782 0.29,-3.8868 0.14,-1.3125 0.38,-2.5468 0.7,-3.707 0.31,-1.1562 0.75,-2.0586 1.3,-2.707 l -5.56,0 c -0.29,0.5 -0.53,1.1015 -0.71,1.8203 -0.16,0.7187 -0.29,1.4531 -0.36,2.2265 -0.09,0.7618 -0.14,1.5118 -0.19,2.25 -0.05,0.7461 -0.1,1.3946 -0.15,1.9297 -0.09,1.875 -0.27,3.7461 -0.47,5.6016 -0.23,1.8555 -0.69,3.5 -1.42,4.9609 -0.71,1.4532 -1.75,2.6289 -3.1,3.5235 -1.37,0.8906 -3.23,1.2812 -5.6,1.1875 l -19.12,0 0,-23.5 -5.04,0 0,52.914 24.23,0"
|
||||
id="path3128"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 2465.16,70.7109 c -1.02,1.6094 -2.27,2.9493 -3.71,4.0391 -1.47,1.0859 -3.08,1.9141 -4.82,2.4844 -1.75,0.5625 -3.59,0.8515 -5.53,0.8515 -3.5,0 -6.51,-0.6679 -9.03,-2 -2.52,-1.3359 -4.6,-3.0976 -6.18,-5.3007 -1.62,-2.1993 -2.8,-4.6836 -3.58,-7.4454 -0.76,-2.7695 -1.14,-5.6093 -1.14,-8.5273 0,-2.8633 0.38,-5.6914 1.14,-8.4844 0.78,-2.789 1.96,-5.2812 3.58,-7.4804 1.58,-2.1993 3.66,-3.9688 6.18,-5.2969 2.52,-1.3399 5.53,-2.0039 9.03,-2.0039 2.47,0 4.7,0.4453 6.66,1.332 2,0.8906 3.7,2.1016 5.13,3.6289 1.44,1.5274 2.6,3.3281 3.48,5.3711 0.9,2.0508 1.46,4.2695 1.71,6.6367 l 5.04,0 c -0.35,-3.2578 -1.13,-6.1953 -2.3,-8.8203 -1.19,-2.6172 -2.72,-4.8359 -4.59,-6.668 -1.88,-1.8281 -4.09,-3.2304 -6.63,-4.2226 -2.56,-0.9844 -5.37,-1.4844 -8.5,-1.4844 -4.15,0 -7.79,0.7617 -10.93,2.2656 -3.13,1.5079 -5.74,3.5196 -7.83,6.0391 -2.05,2.5156 -3.62,5.4297 -4.65,8.7461 -1.04,3.3008 -1.56,6.7812 -1.56,10.4414 0,3.6602 0.52,7.1445 1.56,10.4492 1.03,3.3125 2.6,6.2383 4.65,8.7813 2.09,2.5429 4.7,4.5742 7.83,6.0781 3.14,1.5078 6.78,2.2578 10.93,2.2578 2.52,0 4.97,-0.3711 7.38,-1.1094 2.39,-0.7382 4.57,-1.8398 6.52,-3.2968 1.95,-1.461 3.57,-3.25 4.89,-5.3711 1.31,-2.1289 2.14,-4.5703 2.48,-7.3399 l -5.04,0 c -0.45,2.0274 -1.17,3.8399 -2.17,5.4492"
|
||||
id="path3130"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 2519.59,81.2695 0,-4.2929 -31.5,0 0,-19.1211 29.5,0 0,-4.3008 -29.5,0 0,-20.8945 31.86,0 0,-4.3047 -36.9,0 0,52.914 36.54,0"
|
||||
id="path3132"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 109.109,130.879 c -2.171,6.246 -5.242,11.781 -9.2145,16.601 -3.9765,4.825 -8.8007,8.7 -14.4765,11.637 -5.6758,2.938 -12.1133,4.395 -19.2969,4.395 -7.375,0 -13.9023,-1.457 -19.5781,-4.395 -5.6797,-2.937 -10.5039,-6.812 -14.4766,-11.637 -3.9726,-4.82 -7.1406,-10.41 -9.5078,-16.75 -2.3633,-6.332 -3.9258,-12.816 -4.6758,-19.433 l 94.7732,0 c -0.179,6.812 -1.371,13.348 -3.547,19.582 z M 20.5703,76.2539 c 1.8008,-6.9141 4.6875,-13.1055 8.6563,-18.5937 3.9765,-5.4883 8.9922,-10.0235 15.0429,-13.6133 6.0547,-3.6016 13.336,-5.3985 21.8516,-5.3985 13.0586,0 23.2734,3.4102 30.6484,10.2188 7.3755,6.8008 12.4885,15.8867 15.3205,27.2344 l 17.883,0 C 126.184,59.457 119.234,46.5898 109.109,37.5195 98.9922,28.4258 84.6602,23.8906 66.1211,23.8906 c -11.5352,0 -21.5234,2.043 -29.9336,6.1133 C 27.7578,34.0664 20.9023,39.6445 15.6094,46.7422 10.3125,53.8281 6.39063,62.0586 3.83203,71.4336 1.28125,80.7891 0,90.6719 0,101.086 c 0,9.641 1.28125,19.098 3.83203,28.371 2.5586,9.27 6.48047,17.551 11.77737,24.828 5.2929,7.285 12.1484,13.153 20.5781,17.606 8.4102,4.437 18.3984,6.664 29.9336,6.664 11.7266,0 21.7539,-2.375 30.0859,-7.102 8.32,-4.719 15.078,-10.922 20.285,-18.578 5.196,-7.66 8.938,-16.461 11.203,-26.395 2.278,-9.937 3.219,-20 2.84,-30.2222 l -112.6522,0 c 0,-6.4336 0.8867,-13.1055 2.6875,-20.0039"
|
||||
id="path3134"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 157.859,174.297 0,-25.258 0.571,0 c 3.41,8.895 9.457,16.039 18.164,21.426 8.695,5.398 18.254,8.09 28.66,8.09 10.219,0 18.777,-1.325 25.68,-3.977 6.91,-2.648 12.441,-6.379 16.601,-11.203 4.16,-4.824 7.098,-10.738 8.797,-17.734 1.699,-7.008 2.555,-14.864 2.555,-23.555 l 0,-94.2188 -17.875,0 0,91.3708 c 0,6.246 -0.567,12.059 -1.703,17.461 -1.141,5.387 -3.121,10.067 -5.957,14.047 -2.844,3.969 -6.676,7.09 -11.5,9.359 -4.821,2.274 -10.829,3.407 -18.02,3.407 -7.191,0 -13.578,-1.278 -19.152,-3.828 -5.578,-2.555 -10.309,-6.055 -14.192,-10.5 -3.879,-4.442 -6.91,-9.743 -9.082,-15.895 -2.172,-6.156 -3.355,-12.82 -3.547,-20.004 l 0,-85.4178 -17.871,0 0,146.4298 17.871,0"
|
||||
id="path3136"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 274.938,174.297 45.976,-128.5509 0.566,0 45.407,128.5509 18.449,0 -54.77,-146.4298 -19.019,0 -56.465,146.4298 19.856,0"
|
||||
id="path3138"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 409.707,174.297 0,-146.4298 -17.875,0 0,146.4298 17.875,0 z m 0,56.187 0,-28.664 -17.875,0 0,28.664 17.875,0"
|
||||
id="path3140"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 487.02,104.863 c 3.683,0 7.222,0.27 10.632,0.809 3.407,0.547 6.403,1.601 8.993,3.172 2.589,1.566 4.668,3.785 6.238,6.648 1.558,2.852 2.347,6.613 2.347,11.235 0,4.636 -0.789,8.39 -2.347,11.25 -1.57,2.859 -3.649,5.074 -6.238,6.64 -2.59,1.571 -5.586,2.629 -8.993,3.172 -3.41,0.547 -6.949,0.813 -10.632,0.813 l -24.934,0 0,-43.739 24.934,0 z m 8.793,68.68 c 9.128,0 16.898,-1.32 23.304,-3.988 6.406,-2.66 11.613,-6.16 15.633,-10.524 4.023,-4.359 6.961,-9.34 8.797,-14.922 1.84,-5.589 2.758,-11.379 2.758,-17.382 0,-5.852 -0.918,-11.614 -2.758,-17.27 -1.836,-5.652 -4.774,-10.664 -8.797,-15.0273 -4.02,-4.3633 -9.227,-7.8672 -15.633,-10.5234 -6.406,-2.6602 -14.176,-3.9844 -23.304,-3.9844 l -33.727,0 0,-52.336 -32.102,0 0,145.9571 65.829,0"
|
||||
id="path3142"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 614.273,76.7539 c -1.835,-0.6211 -3.812,-1.1289 -5.929,-1.5351 -2.114,-0.4102 -4.324,-0.75 -6.641,-1.0196 -2.316,-0.2773 -4.637,-0.6211 -6.953,-1.0273 -2.176,-0.4102 -4.328,-0.9571 -6.437,-1.6328 -2.114,-0.6836 -3.958,-1.6016 -5.52,-2.7618 -1.566,-1.1562 -2.832,-2.625 -3.777,-4.3945 -0.957,-1.7773 -1.438,-4.0234 -1.438,-6.7461 0,-2.5937 0.481,-4.7695 1.438,-6.5429 0.945,-1.7696 2.246,-3.168 3.879,-4.1915 1.636,-1.0234 3.543,-1.7382 5.726,-2.1484 2.176,-0.4101 4.426,-0.6094 6.746,-0.6094 5.723,0 10.145,0.9532 13.285,2.8672 3.133,1.9024 5.45,4.1875 6.953,6.8438 1.497,2.6562 2.418,5.3476 2.758,8.0742 0.336,2.7226 0.516,4.9101 0.516,6.543 l 0,10.8398 c -1.234,-1.1055 -2.766,-1.9492 -4.606,-2.5586 z m -57.339,40.9841 c 3,4.496 6.812,8.106 11.449,10.832 4.637,2.723 9.844,4.672 15.637,5.828 5.789,1.157 11.617,1.735 17.48,1.735 5.316,0 10.691,-0.375 16.145,-1.125 5.457,-0.75 10.425,-2.211 14.921,-4.395 4.504,-2.175 8.18,-5.207 11.043,-9.093 2.86,-3.883 4.297,-9.032 4.297,-15.434 l 0,-54.9922 c 0,-4.7774 0.27,-9.336 0.817,-13.6954 0.539,-4.3632 1.496,-7.6328 2.859,-9.8125 l -29.437,0 c -0.543,1.6329 -0.989,3.3008 -1.329,5.0118 -0.343,1.6992 -0.582,3.4414 -0.718,5.2109 -4.633,-4.7734 -10.082,-8.1133 -16.348,-10.0156 -6.273,-1.9063 -12.676,-2.8672 -19.219,-2.8672 -5.043,0 -9.746,0.6172 -14.105,1.8398 -4.367,1.2266 -8.18,3.1367 -11.449,5.7305 -3.27,2.5859 -5.825,5.8594 -7.668,9.8125 -1.84,3.9492 -2.758,8.6523 -2.758,14.1016 0,5.9961 1.058,10.9375 3.168,14.8242 2.109,3.8789 4.836,6.9726 8.179,9.2969 3.34,2.3164 7.157,4.0585 11.446,5.2148 4.297,1.1562 8.617,2.0742 12.98,2.7539 4.364,0.6836 8.656,1.2266 12.879,1.6367 4.227,0.4102 7.973,1.0235 11.25,1.8438 3.266,0.8125 5.856,2.0117 7.762,3.582 1.91,1.5664 2.793,3.8477 2.664,6.8435 0,3.137 -0.516,5.617 -1.535,7.461 -1.028,1.844 -2.395,3.266 -4.09,4.293 -1.707,1.02 -3.68,1.699 -5.93,2.039 -2.25,0.344 -4.672,0.516 -7.258,0.516 -5.718,0 -10.222,-1.223 -13.488,-3.68 -3.277,-2.453 -5.183,-6.543 -5.726,-12.265 l -29.032,0 c 0.41,6.816 2.118,12.46 5.114,16.968"
|
||||
id="path3144"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 720.172,133.277 0,-19.422 -21.254,0 0,-52.3355 c 0,-4.9023 0.812,-8.1718 2.449,-9.8047 1.637,-1.6406 4.903,-2.4609 9.817,-2.4609 1.632,0 3.195,0.0703 4.699,0.2031 1.496,0.1328 2.926,0.3438 4.289,0.6172 l 0,-22.4883 c -2.453,-0.414 -5.176,-0.6836 -8.176,-0.8203 -2.996,-0.1289 -5.926,-0.1992 -8.789,-0.1992 -4.5,0 -8.754,0.3047 -12.773,0.918 -4.024,0.6094 -7.563,1.8086 -10.629,3.5781 -3.071,1.7695 -5.489,4.293 -7.254,7.5586 -1.781,3.2773 -2.664,7.5703 -2.664,12.8828 l 0,62.3511 -17.578,0 0,19.422 17.578,0 0,31.684 29.031,0 0,-31.684 21.254,0"
|
||||
id="path3146"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 755.918,173.543 0,-54.984 0.613,0 c 3.684,6.125 8.387,10.586 14.11,13.379 5.722,2.796 11.312,4.195 16.761,4.195 7.77,0 14.137,-1.059 19.114,-3.164 4.972,-2.114 8.894,-5.043 11.754,-8.793 2.863,-3.75 4.871,-8.317 6.031,-13.699 1.152,-5.383 1.734,-11.3481 1.734,-17.8832 l 0,-65.0079 -29.027,0 0,59.6954 c 0,8.7148 -1.363,15.2227 -4.086,19.5157 -2.731,4.293 -7.567,6.433 -14.516,6.433 -7.902,0 -13.633,-2.343 -17.172,-7.039 -3.543,-4.703 -5.316,-12.441 -5.316,-23.2144 l 0,-55.3907 -29.023,0 0,145.9571 29.023,0"
|
||||
id="path3148"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /><path
|
||||
d="m 863.082,0 21.875,0 0,190.547 -21.875,0 0,-190.547 z"
|
||||
id="path3150"
|
||||
style="fill:#090c0d;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g></g></svg>
|
||||
|
Before Width: | Height: | Size: 41 KiB |
9
static/images/logo-mission.svg
Normal file
|
After Width: | Height: | Size: 22 KiB |
3
static/images/logo-name.svg
Normal file
@ -0,0 +1,3 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 104 26" role="img" fill="currentColor">
|
||||
<path id="ep-logo-name" d="M13.638 12.451a6.613 6.613 0 0 0-1.152-2.075 5.687 5.687 0 0 0-1.81-1.455c-.708-.369-1.513-.55-2.411-.55-.922 0-1.739.181-2.447.55a5.702 5.702 0 0 0-1.81 1.455 7.21 7.21 0 0 0-1.188 2.092 10.28 10.28 0 0 0-.586 2.43h11.848a8.013 8.013 0 0 0-.444-2.447zM2.571 19.277a6.885 6.885 0 0 0 1.082 2.324 6.187 6.187 0 0 0 1.88 1.702c.757.452 1.667.676 2.732.676 1.633 0 2.91-.427 3.83-1.277.923-.852 1.563-1.987 1.917-3.405h2.234c-.474 2.08-1.342 3.689-2.608 4.824-1.264 1.135-3.056 1.702-5.373 1.702-1.442 0-2.69-.254-3.742-.765-1.053-.507-1.91-1.203-2.572-2.092C1.29 22.082.8 21.052.48 19.88A13.991 13.991 0 0 1 0 16.174c0-1.206.16-2.388.479-3.547a9.56 9.56 0 0 1 1.472-3.103c.662-.91 1.519-1.643 2.572-2.2 1.051-.554 2.3-.833 3.742-.833 1.466 0 2.72.296 3.76.887A7.487 7.487 0 0 1 14.562 9.7c.65.959 1.118 2.058 1.401 3.3.284 1.243.402 2.5.354 3.777H2.234c0 .806.113 1.638.337 2.5M19.732 7.024v3.156h.07c.428-1.113 1.184-2.004 2.271-2.678a6.654 6.654 0 0 1 3.584-1.01c1.277 0 2.346.163 3.21.495.862.332 1.555.799 2.075 1.402.52.603.887 1.342 1.1 2.216.212.877.32 1.858.32 2.945v11.777h-2.235v-11.42c0-.782-.071-1.51-.214-2.184-.142-.673-.39-1.26-.745-1.757a3.61 3.61 0 0 0-1.436-1.17c-.603-.283-1.354-.425-2.253-.425-.898 0-1.697.16-2.395.479a5.221 5.221 0 0 0-1.772 1.31 6.034 6.034 0 0 0-1.136 1.988 8.128 8.128 0 0 0-.444 2.5v10.679h-2.234V7.024h2.234M34.367 7.024l5.747 16.067h.071L45.86 7.024h2.307l-6.846 18.303h-2.378L31.885 7.024h2.482M51.214 7.024v18.303h-2.235V7.024h2.235zm0-7.024V3.58h-2.235V0h2.235M61.037 15.703c.46 0 .902-.034 1.33-.103.424-.068.799-.2 1.122-.395.325-.195.584-.474.78-.833.196-.356.294-.825.294-1.404 0-.578-.098-1.047-.294-1.406a2.162 2.162 0 0 0-.78-.83 3.104 3.104 0 0 0-1.123-.395 8.39 8.39 0 0 0-1.329-.103H57.92v5.469h3.117zm1.099-8.587c1.14 0 2.112.166 2.913.498.801.335 1.452.772 1.953 1.316.503.545.871 1.167 1.1 1.866a6.89 6.89 0 0 1 .346 2.172c0 .733-.115 1.453-.346 2.159a5.043 5.043 0 0 1-1.1 1.88c-.501.544-1.152.984-1.953 1.316-.8.332-1.772.498-2.913.498H57.92v6.54h-4.013V7.116h8.229M76.944 19.216a5.26 5.26 0 0 1-.742.19c-.264.052-.54.096-.83.13-.29.034-.579.076-.87.127-.27.051-.54.12-.804.205-.263.085-.494.2-.69.344-.195.144-.354.33-.472.55-.12.222-.18.502-.18.844 0 .323.06.596.18.818.118.22.28.396.485.523.205.129.443.217.716.268.272.051.553.076.844.076.715 0 1.267-.117 1.66-.357.392-.239.68-.525.869-.857.187-.332.303-.668.344-1.008a6.93 6.93 0 0 0 .065-.818v-1.355a1.615 1.615 0 0 1-.575.32zm-7.168-5.124a4.345 4.345 0 0 1 1.43-1.353 6.257 6.257 0 0 1 1.956-.73 11.148 11.148 0 0 1 2.185-.215c.664 0 1.336.047 2.018.14a6.193 6.193 0 0 1 1.866.549 3.69 3.69 0 0 1 1.38 1.137c.356.486.537 1.128.537 1.93v6.874c0 .596.033 1.167.102 1.712.066.544.186.954.357 1.225h-3.68a5.23 5.23 0 0 1-.256-1.277 4.735 4.735 0 0 1-2.043 1.253 8.261 8.261 0 0 1-2.402.356c-.63 0-1.219-.076-1.763-.23a4.016 4.016 0 0 1-1.432-.715 3.34 3.34 0 0 1-.958-1.228c-.23-.493-.344-1.081-.344-1.762 0-.75.131-1.368.395-1.853a3.324 3.324 0 0 1 1.023-1.163 4.648 4.648 0 0 1 1.43-.651 15.515 15.515 0 0 1 1.623-.345 27.623 27.623 0 0 1 1.61-.202c.527-.052.996-.13 1.406-.232.408-.1.732-.252.97-.447.239-.195.349-.48.333-.857 0-.39-.065-.7-.192-.933a1.406 1.406 0 0 0-.511-.534 2.014 2.014 0 0 0-.741-.257 6.208 6.208 0 0 0-.907-.063c-.715 0-1.278.151-1.686.459-.41.308-.648.818-.716 1.533h-3.63c.052-.852.265-1.557.64-2.121M90.181 12.15v2.427h-2.657v6.543c0 .613.101 1.02.306 1.226.205.205.613.308 1.227.308.204 0 .399-.01.587-.027.188-.015.366-.042.537-.076v2.81a8.619 8.619 0 0 1-1.023.103c-.373.017-.74.024-1.098.024a10.41 10.41 0 0 1-1.597-.115 3.728 3.728 0 0 1-1.328-.446 2.356 2.356 0 0 1-.907-.945c-.222-.41-.333-.945-.333-1.61v-7.795h-2.198v-2.426h2.198V8.19h3.629v3.96h2.657M94.703 7.116v6.873h.075c.462-.764 1.05-1.323 1.764-1.672.716-.35 1.415-.523 2.096-.523.97 0 1.767.132 2.388.396.622.263 1.113.63 1.47 1.098.358.47.609 1.04.754 1.712.144.674.217 1.418.217 2.236v8.125h-3.629v-7.46c0-1.09-.17-1.905-.511-2.44-.34-.537-.945-.805-1.814-.805-.988 0-1.704.293-2.146.88-.443.587-.664 1.556-.664 2.901v6.924h-3.628V7.116h3.628" class="logo-main"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 4.1 KiB |
3
static/images/logo-square.svg
Normal file
@ -0,0 +1,3 @@
|
||||
<svg fill="currentColor" id="ep-logo-square" role="img" viewBox="0 0 65 65" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M26.538 25.283c-.532-1.519-1.274-2.861-2.24-4.037a11.163 11.163 0 0 0-3.522-2.828c-1.381-.712-2.948-1.07-4.697-1.07-1.791 0-3.379.358-4.76 1.07a11.11 11.11 0 0 0-3.522 2.828c-.966 1.176-1.737 2.533-2.308 4.07a19.839 19.839 0 0 0-1.139 4.728h23.047a15.559 15.559 0 0 0-.86-4.76zM5 38.57c.44 1.685 1.143 3.189 2.11 4.527.966 1.333 2.187 2.436 3.656 3.31 1.475.875 3.243 1.308 5.313 1.308 3.178 0 5.665-.83 7.456-2.485 1.793-1.65 3.037-3.867 3.726-6.626h4.35c-.922 4.054-2.613 7.179-5.073 9.39-2.46 2.208-5.947 3.315-10.46 3.315-2.802 0-5.233-.502-7.274-1.489-2.056-.986-3.721-2.348-5.01-4.072-1.284-1.724-2.241-3.725-2.861-6.005C.313 37.465 0 35.058 0 32.529c0-2.343.313-4.649.933-6.9.62-2.254 1.577-4.267 2.861-6.04 1.289-1.772 2.954-3.197 5.01-4.281 2.041-1.08 4.472-1.616 7.275-1.616 2.856 0 5.293.571 7.32 1.724 2.026 1.147 3.666 2.66 4.931 4.521 1.265 1.86 2.177 3.999 2.725 6.416a27.9 27.9 0 0 1 .692 7.348H4.35c0 1.567.215 3.188.65 4.868M49.301 31.456c.914 0 1.793-.07 2.638-.202.845-.136 1.586-.4 2.23-.78.641-.391 1.159-.942 1.548-1.656.387-.702.582-1.64.582-2.782 0-1.148-.195-2.08-.582-2.79-.39-.707-.907-1.26-1.547-1.65-.645-.386-1.386-.65-2.231-.78a16.554 16.554 0 0 0-2.638-.206h-6.176v10.846h6.176zm2.184-17.032c2.26 0 4.189.331 5.776.995 1.592.66 2.88 1.524 3.877 2.608a10.007 10.007 0 0 1 2.177 3.696c.46 1.393.684 2.828.684 4.313 0 1.455-.224 2.88-.684 4.28a9.955 9.955 0 0 1-2.177 3.727c-.997 1.079-2.285 1.953-3.877 2.607-1.587.664-3.516.992-5.776.992h-8.36v12.974h-7.964V14.424h16.324" class="logo-main"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.6 KiB |
BIN
static/images/uoa-logo-small.png
Executable file
|
After Width: | Height: | Size: 17 KiB |
|
Before Width: | Height: | Size: 91 KiB |
|
Before Width: | Height: | Size: 814 B |
378
static/js/alpine/components/schema-form.js
Normal file
@ -0,0 +1,378 @@
|
||||
/**
|
||||
* Alpine.js Schema Renderer Component
|
||||
*
|
||||
* Renders forms dynamically from JSON Schema with RJSF format support.
|
||||
* Supports uiSchema for widget hints, labels, help text, and field ordering.
|
||||
*
|
||||
* Usage:
|
||||
* <div x-data="schemaRenderer({
|
||||
* rjsf: { schema: {...}, uiSchema: {...}, formData: {...}, groups: [...] },
|
||||
* data: { interval: { start: 20, end: 25 } },
|
||||
* mode: 'view', // 'view' | 'edit'
|
||||
* endpoint: '/api/v1/scenario/{uuid}/information/temperature/'
|
||||
* })">
|
||||
*/
|
||||
document.addEventListener("alpine:init", () => {
|
||||
// Global validation error store with context scoping
|
||||
Alpine.store('validationErrors', {
|
||||
errors: {},
|
||||
|
||||
// Set errors for a specific context (UUID) or globally (no context)
|
||||
setErrors(errors, context = null) {
|
||||
if (context) {
|
||||
// Namespace all field names with context prefix
|
||||
const namespacedErrors = {};
|
||||
Object.entries(errors).forEach(([field, messages]) => {
|
||||
const key = `${context}.${field}`;
|
||||
namespacedErrors[key] = messages;
|
||||
});
|
||||
// Merge into existing errors (preserves other contexts)
|
||||
this.errors = { ...this.errors, ...namespacedErrors };
|
||||
} else {
|
||||
// No context - merge as-is for backward compatibility
|
||||
this.errors = { ...this.errors, ...errors };
|
||||
}
|
||||
},
|
||||
|
||||
// Clear errors for a specific context or all errors
|
||||
clearErrors(context = null) {
|
||||
if (context) {
|
||||
// Clear only errors for this context
|
||||
const newErrors = {};
|
||||
const prefix = `${context}.`;
|
||||
Object.keys(this.errors).forEach(key => {
|
||||
if (!key.startsWith(prefix)) {
|
||||
newErrors[key] = this.errors[key];
|
||||
}
|
||||
});
|
||||
this.errors = newErrors;
|
||||
} else {
|
||||
// Clear all errors
|
||||
this.errors = {};
|
||||
}
|
||||
},
|
||||
|
||||
// Clear a specific field, optionally within a context
|
||||
clearField(fieldName, context = null) {
|
||||
const key = context ? `${context}.${fieldName}` : fieldName;
|
||||
if (this.errors[key]) {
|
||||
delete this.errors[key];
|
||||
// Trigger reactivity by creating new object
|
||||
this.errors = { ...this.errors };
|
||||
}
|
||||
},
|
||||
|
||||
// Check if a field has errors, optionally within a context
|
||||
hasError(fieldName, context = null) {
|
||||
const key = context ? `${context}.${fieldName}` : fieldName;
|
||||
return Array.isArray(this.errors[key]) && this.errors[key].length > 0;
|
||||
},
|
||||
|
||||
// Get errors for a field, optionally within a context
|
||||
getErrors(fieldName, context = null) {
|
||||
const key = context ? `${context}.${fieldName}` : fieldName;
|
||||
return this.errors[key] || [];
|
||||
}
|
||||
});
|
||||
|
||||
Alpine.data("schemaRenderer", (options = {}) => ({
|
||||
schema: null,
|
||||
uiSchema: {},
|
||||
data: {},
|
||||
mode: options.mode || "view", // 'view' | 'edit'
|
||||
endpoint: options.endpoint || "",
|
||||
loading: false,
|
||||
error: null,
|
||||
context: options.context || null, // UUID for items, null for single forms
|
||||
debugErrors:
|
||||
options.debugErrors ??
|
||||
(typeof window !== "undefined" &&
|
||||
window.location?.search?.includes("debugErrors=1")),
|
||||
|
||||
async init() {
|
||||
if (options.schemaUrl) {
|
||||
try {
|
||||
this.loading = true;
|
||||
const res = await fetch(options.schemaUrl);
|
||||
if (!res.ok) {
|
||||
throw new Error(`Failed to load schema: ${res.statusText}`);
|
||||
}
|
||||
const rjsf = await res.json();
|
||||
|
||||
// RJSF format: {schema, uiSchema, formData, groups}
|
||||
if (!rjsf.schema) {
|
||||
throw new Error("Invalid RJSF format: missing schema property");
|
||||
}
|
||||
|
||||
this.schema = rjsf.schema;
|
||||
this.uiSchema = rjsf.uiSchema || {};
|
||||
this.data = options.data
|
||||
? JSON.parse(JSON.stringify(options.data))
|
||||
: rjsf.formData || {};
|
||||
} catch (err) {
|
||||
this.error = err.message;
|
||||
console.error("Error loading schema:", err);
|
||||
} finally {
|
||||
this.loading = false;
|
||||
}
|
||||
} else if (options.rjsf) {
|
||||
// Direct RJSF object passed
|
||||
if (!options.rjsf.schema) {
|
||||
throw new Error("Invalid RJSF format: missing schema property");
|
||||
}
|
||||
|
||||
this.schema = options.rjsf.schema;
|
||||
this.uiSchema = options.rjsf.uiSchema || {};
|
||||
this.data = options.data
|
||||
? JSON.parse(JSON.stringify(options.data))
|
||||
: options.rjsf.formData || {};
|
||||
}
|
||||
|
||||
// Initialize data from formData or options
|
||||
if (!this.data || Object.keys(this.data).length === 0) {
|
||||
this.data = {};
|
||||
}
|
||||
|
||||
// Ensure all schema fields are properly initialized
|
||||
if (this.schema && this.schema.properties) {
|
||||
for (const [key, propSchema] of Object.entries(
|
||||
this.schema.properties,
|
||||
)) {
|
||||
const widget = this.getWidget(key, propSchema);
|
||||
|
||||
if (widget === "interval") {
|
||||
// Ensure interval fields are objects with start/end
|
||||
if (!this.data[key] || typeof this.data[key] !== "object") {
|
||||
this.data[key] = { start: null, end: null };
|
||||
} else {
|
||||
// Ensure start and end exist
|
||||
if (this.data[key].start === undefined)
|
||||
this.data[key].start = null;
|
||||
if (this.data[key].end === undefined) this.data[key].end = null;
|
||||
}
|
||||
} else if (widget === "timeseries-table") {
|
||||
// Ensure timeseries fields are arrays
|
||||
if (!this.data[key] || !Array.isArray(this.data[key])) {
|
||||
this.data[key] = [];
|
||||
}
|
||||
} else if (this.data[key] === undefined) {
|
||||
// ONLY initialize if truly undefined, not just falsy
|
||||
// This preserves empty strings, null, 0, false as valid values
|
||||
if (propSchema.type === "boolean") {
|
||||
this.data[key] = false;
|
||||
} else if (
|
||||
propSchema.type === "number" ||
|
||||
propSchema.type === "integer"
|
||||
) {
|
||||
this.data[key] = null;
|
||||
} else if (propSchema.enum) {
|
||||
// For select fields, use null to show placeholder
|
||||
this.data[key] = null;
|
||||
} else {
|
||||
this.data[key] = "";
|
||||
}
|
||||
}
|
||||
// If data[key] exists (even if empty string or null), don't overwrite
|
||||
}
|
||||
}
|
||||
|
||||
// UX: Clear field errors when fields change (with context)
|
||||
if (this.mode === "edit" && this.schema?.properties) {
|
||||
Object.keys(this.schema.properties).forEach((key) => {
|
||||
this.$watch(
|
||||
`data.${key}`,
|
||||
() => {
|
||||
Alpine.store('validationErrors').clearField(key, this.context);
|
||||
},
|
||||
{ deep: true },
|
||||
);
|
||||
});
|
||||
}
|
||||
},
|
||||
|
||||
getWidget(fieldName, fieldSchema) {
|
||||
// Defensive check: ensure fieldSchema is provided
|
||||
if (!fieldSchema) return "text";
|
||||
|
||||
try {
|
||||
// Check uiSchema first (RJSF format)
|
||||
if (
|
||||
this.uiSchema &&
|
||||
this.uiSchema[fieldName] &&
|
||||
this.uiSchema[fieldName]["ui:widget"]
|
||||
) {
|
||||
return this.uiSchema[fieldName]["ui:widget"];
|
||||
}
|
||||
|
||||
// Check for interval type (object with start/end properties)
|
||||
if (
|
||||
fieldSchema.type === "object" &&
|
||||
fieldSchema.properties &&
|
||||
fieldSchema.properties.start &&
|
||||
fieldSchema.properties.end
|
||||
) {
|
||||
return "interval";
|
||||
}
|
||||
|
||||
// Check for measurements array type (timeseries-table widget)
|
||||
if (
|
||||
fieldSchema.type === "array" &&
|
||||
fieldSchema.items?.properties?.timestamp &&
|
||||
fieldSchema.items?.properties?.value
|
||||
) {
|
||||
return "timeseries-table";
|
||||
}
|
||||
|
||||
// Infer from JSON Schema type
|
||||
if (fieldSchema.enum) return "select";
|
||||
if (fieldSchema.type === "number" || fieldSchema.type === "integer")
|
||||
return "number";
|
||||
if (fieldSchema.type === "boolean") return "checkbox";
|
||||
return "text";
|
||||
} catch (e) {
|
||||
// Fallback to text widget if anything fails
|
||||
console.warn("Error in getWidget:", e);
|
||||
return "text";
|
||||
}
|
||||
},
|
||||
|
||||
getLabel(fieldName, fieldSchema) {
|
||||
// Check uiSchema (RJSF format)
|
||||
if (this.uiSchema[fieldName] && this.uiSchema[fieldName]["ui:label"]) {
|
||||
return this.uiSchema[fieldName]["ui:label"];
|
||||
}
|
||||
|
||||
// Default: format field name
|
||||
return fieldName
|
||||
.replace(/_/g, " ")
|
||||
.replace(/\b\w/g, (c) => c.toUpperCase());
|
||||
},
|
||||
|
||||
getFieldOrder() {
|
||||
try {
|
||||
// Get ordered list of field names based on ui:order
|
||||
if (!this.schema || !this.schema.properties) return [];
|
||||
|
||||
// Only include fields that have UI configs
|
||||
const fields = Object.keys(this.schema.properties).filter(
|
||||
(fieldName) => this.uiSchema && this.uiSchema[fieldName],
|
||||
);
|
||||
|
||||
// Sort by ui:order if available
|
||||
return fields.sort((a, b) => {
|
||||
const orderA = this.uiSchema[a]?.["ui:order"] || "999";
|
||||
const orderB = this.uiSchema[b]?.["ui:order"] || "999";
|
||||
return parseInt(orderA) - parseInt(orderB);
|
||||
});
|
||||
} catch (e) {
|
||||
// Return empty array if anything fails to prevent errors
|
||||
console.warn("Error in getFieldOrder:", e);
|
||||
return [];
|
||||
}
|
||||
},
|
||||
|
||||
hasTimeseriesField() {
|
||||
try {
|
||||
// Check if any field in the schema is a timeseries-table widget
|
||||
if (!this.schema || !this.schema.properties) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return Object.keys(this.schema.properties).some((fieldName) => {
|
||||
const fieldSchema = this.schema.properties[fieldName];
|
||||
if (!fieldSchema) return false;
|
||||
return this.getWidget(fieldName, fieldSchema) === "timeseries-table";
|
||||
});
|
||||
} catch (e) {
|
||||
// Return false if anything fails to prevent errors
|
||||
console.warn("Error in hasTimeseriesField:", e);
|
||||
return false;
|
||||
}
|
||||
},
|
||||
|
||||
async submit() {
|
||||
if (!this.endpoint) {
|
||||
console.error("No endpoint specified for submission");
|
||||
return;
|
||||
}
|
||||
|
||||
this.loading = true;
|
||||
this.error = null;
|
||||
|
||||
try {
|
||||
const csrftoken =
|
||||
document.querySelector("[name=csrf-token]")?.content || "";
|
||||
const res = await fetch(this.endpoint, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"X-CSRFToken": csrftoken,
|
||||
},
|
||||
body: JSON.stringify(this.data),
|
||||
});
|
||||
|
||||
if (!res.ok) {
|
||||
let errorData;
|
||||
try {
|
||||
errorData = await res.json();
|
||||
} catch {
|
||||
errorData = { error: res.statusText };
|
||||
}
|
||||
|
||||
// Handle validation errors (field-level)
|
||||
Alpine.store('validationErrors').clearErrors();
|
||||
|
||||
// Try to parse structured error response
|
||||
let parsedError = errorData;
|
||||
|
||||
// If error is a JSON string, parse it
|
||||
if (
|
||||
typeof errorData.error === "string" &&
|
||||
errorData.error.startsWith("{")
|
||||
) {
|
||||
parsedError = JSON.parse(errorData.error);
|
||||
}
|
||||
|
||||
if (parsedError.detail && Array.isArray(parsedError.detail)) {
|
||||
// Pydantic validation errors format: [{loc: ['field'], msg: '...', type: '...'}]
|
||||
const fieldErrors = {};
|
||||
for (const err of parsedError.detail) {
|
||||
const field =
|
||||
err.loc && err.loc.length > 0
|
||||
? err.loc[err.loc.length - 1]
|
||||
: "root";
|
||||
if (!fieldErrors[field]) {
|
||||
fieldErrors[field] = [];
|
||||
}
|
||||
fieldErrors[field].push(
|
||||
err.msg || err.message || "Validation error",
|
||||
);
|
||||
}
|
||||
Alpine.store('validationErrors').setErrors(fieldErrors);
|
||||
throw new Error(
|
||||
"Validation failed. Please check the fields below.",
|
||||
);
|
||||
} else {
|
||||
// General error
|
||||
throw new Error(
|
||||
parsedError.error ||
|
||||
parsedError.detail ||
|
||||
`Request failed: ${res.statusText}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Clear errors on success
|
||||
Alpine.store('validationErrors').clearErrors();
|
||||
|
||||
const result = await res.json();
|
||||
return result;
|
||||
} catch (err) {
|
||||
this.error = err.message;
|
||||
throw err;
|
||||
} finally {
|
||||
this.loading = false;
|
||||
}
|
||||
},
|
||||
}));
|
||||
});
|
||||
462
static/js/alpine/components/widgets.js
Normal file
@ -0,0 +1,462 @@
|
||||
/**
|
||||
* Alpine.js Widget Components for Schema Forms
|
||||
*
|
||||
* Centralized widget component definitions for dynamic form rendering.
|
||||
* Each widget receives explicit parameters instead of context object for better traceability.
|
||||
*/
|
||||
document.addEventListener("alpine:init", () => {
|
||||
// Base widget factory with common functionality
|
||||
const baseWidget = (
|
||||
fieldName,
|
||||
data,
|
||||
schema,
|
||||
uiSchema,
|
||||
mode,
|
||||
debugErrors,
|
||||
context = null // NEW: context for error namespacing
|
||||
) => ({
|
||||
fieldName,
|
||||
data,
|
||||
schema,
|
||||
uiSchema,
|
||||
mode,
|
||||
debugErrors,
|
||||
context, // Store context for use in templates
|
||||
|
||||
// Field schema access
|
||||
get fieldSchema() {
|
||||
return this.schema?.properties?.[this.fieldName] || {};
|
||||
},
|
||||
|
||||
// Common metadata
|
||||
get label() {
|
||||
// Check uiSchema first (RJSF format)
|
||||
if (this.uiSchema?.[this.fieldName]?.["ui:label"]) {
|
||||
return this.uiSchema[this.fieldName]["ui:label"];
|
||||
}
|
||||
// Fall back to schema title
|
||||
if (this.fieldSchema.title) {
|
||||
return this.fieldSchema.title;
|
||||
}
|
||||
// Default: format field name
|
||||
return this.fieldName
|
||||
.replace(/_/g, " ")
|
||||
.replace(/\b\w/g, (c) => c.toUpperCase());
|
||||
},
|
||||
get helpText() {
|
||||
return this.fieldSchema.description || "";
|
||||
},
|
||||
|
||||
// Field-level unit extraction from uiSchema (RJSF format)
|
||||
get unit() {
|
||||
return this.uiSchema?.[this.fieldName]?.["ui:unit"] || null;
|
||||
},
|
||||
|
||||
// Mode checks
|
||||
get isViewMode() {
|
||||
return this.mode === "view";
|
||||
},
|
||||
get isEditMode() {
|
||||
return this.mode === "edit";
|
||||
},
|
||||
});
|
||||
|
||||
// Text widget
|
||||
Alpine.data(
|
||||
"textWidget",
|
||||
(fieldName, data, schema, uiSchema, mode, debugErrors, context = null) => ({
|
||||
...baseWidget(
|
||||
fieldName,
|
||||
data,
|
||||
schema,
|
||||
uiSchema,
|
||||
mode,
|
||||
debugErrors,
|
||||
context,
|
||||
),
|
||||
|
||||
get value() {
|
||||
return this.data[this.fieldName] || "";
|
||||
},
|
||||
set value(v) {
|
||||
this.data[this.fieldName] = v;
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
// Textarea widget
|
||||
Alpine.data(
|
||||
"textareaWidget",
|
||||
(fieldName, data, schema, uiSchema, mode, debugErrors, context = null) => ({
|
||||
...baseWidget(
|
||||
fieldName,
|
||||
data,
|
||||
schema,
|
||||
uiSchema,
|
||||
mode,
|
||||
debugErrors,
|
||||
context,
|
||||
),
|
||||
|
||||
get value() {
|
||||
return this.data[this.fieldName] || "";
|
||||
},
|
||||
set value(v) {
|
||||
this.data[this.fieldName] = v;
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
// Number widget with unit support
|
||||
Alpine.data(
|
||||
"numberWidget",
|
||||
(fieldName, data, schema, uiSchema, mode, debugErrors, context = null) => ({
|
||||
...baseWidget(
|
||||
fieldName,
|
||||
data,
|
||||
schema,
|
||||
uiSchema,
|
||||
mode,
|
||||
debugErrors,
|
||||
context,
|
||||
),
|
||||
|
||||
get value() {
|
||||
return this.data[this.fieldName];
|
||||
},
|
||||
set value(v) {
|
||||
this.data[this.fieldName] =
|
||||
v === "" || v === null ? null : parseFloat(v);
|
||||
},
|
||||
get hasValue() {
|
||||
return (
|
||||
this.value !== null && this.value !== undefined && this.value !== ""
|
||||
);
|
||||
},
|
||||
// Format value with unit for view mode
|
||||
get displayValue() {
|
||||
if (!this.hasValue) return "—";
|
||||
return this.unit ? `${this.value} ${this.unit}` : String(this.value);
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
// Select widget
|
||||
Alpine.data(
|
||||
"selectWidget",
|
||||
(fieldName, data, schema, uiSchema, mode, debugErrors, context = null) => ({
|
||||
...baseWidget(
|
||||
fieldName,
|
||||
data,
|
||||
schema,
|
||||
uiSchema,
|
||||
mode,
|
||||
debugErrors,
|
||||
context,
|
||||
),
|
||||
|
||||
get value() {
|
||||
return this.data[this.fieldName] || "";
|
||||
},
|
||||
set value(v) {
|
||||
this.data[this.fieldName] = v;
|
||||
},
|
||||
get options() {
|
||||
return this.fieldSchema.enum || [];
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
// Checkbox widget
|
||||
Alpine.data(
|
||||
"checkboxWidget",
|
||||
(fieldName, data, schema, uiSchema, mode, debugErrors, context = null) => ({
|
||||
...baseWidget(
|
||||
fieldName,
|
||||
data,
|
||||
schema,
|
||||
uiSchema,
|
||||
mode,
|
||||
debugErrors,
|
||||
context,
|
||||
),
|
||||
|
||||
get checked() {
|
||||
return !!this.data[this.fieldName];
|
||||
},
|
||||
set checked(v) {
|
||||
this.data[this.fieldName] = v;
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
// Interval widget with unit support
|
||||
Alpine.data(
|
||||
"intervalWidget",
|
||||
(fieldName, data, schema, uiSchema, mode, debugErrors, context = null) => ({
|
||||
...baseWidget(
|
||||
fieldName,
|
||||
data,
|
||||
schema,
|
||||
uiSchema,
|
||||
mode,
|
||||
debugErrors,
|
||||
context,
|
||||
),
|
||||
|
||||
get start() {
|
||||
return this.data[this.fieldName]?.start ?? null;
|
||||
},
|
||||
set start(v) {
|
||||
if (!this.data[this.fieldName]) this.data[this.fieldName] = {};
|
||||
this.data[this.fieldName].start =
|
||||
v === "" || v === null ? null : parseFloat(v);
|
||||
},
|
||||
get end() {
|
||||
return this.data[this.fieldName]?.end ?? null;
|
||||
},
|
||||
set end(v) {
|
||||
if (!this.data[this.fieldName]) this.data[this.fieldName] = {};
|
||||
this.data[this.fieldName].end =
|
||||
v === "" || v === null ? null : parseFloat(v);
|
||||
},
|
||||
// Format interval with unit for view mode
|
||||
get displayValue() {
|
||||
const s = this.start,
|
||||
e = this.end;
|
||||
const unitStr = this.unit ? ` ${this.unit}` : "";
|
||||
|
||||
if (s !== null && e !== null) return `${s} – ${e}${unitStr}`;
|
||||
if (s !== null) return `≥ ${s}${unitStr}`;
|
||||
if (e !== null) return `≤ ${e}${unitStr}`;
|
||||
return "—";
|
||||
},
|
||||
|
||||
get isSameValue() {
|
||||
return this.start !== null && this.start === this.end;
|
||||
},
|
||||
|
||||
// Validation: start must be <= end (client-side)
|
||||
get hasValidationError() {
|
||||
if (this.isViewMode) return false;
|
||||
const s = this.start;
|
||||
const e = this.end;
|
||||
// Only validate if both values are provided
|
||||
if (
|
||||
s !== null &&
|
||||
e !== null &&
|
||||
typeof s === "number" &&
|
||||
typeof e === "number"
|
||||
) {
|
||||
return s > e;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
// PubMed link widget
|
||||
Alpine.data(
|
||||
"pubmedWidget",
|
||||
(fieldName, data, schema, uiSchema, mode, debugErrors, context = null) => ({
|
||||
...baseWidget(
|
||||
fieldName,
|
||||
data,
|
||||
schema,
|
||||
uiSchema,
|
||||
mode,
|
||||
debugErrors,
|
||||
context,
|
||||
),
|
||||
|
||||
get value() {
|
||||
return this.data[this.fieldName] || "";
|
||||
},
|
||||
set value(v) {
|
||||
this.data[this.fieldName] = v;
|
||||
},
|
||||
get pubmedUrl() {
|
||||
return this.value
|
||||
? `https://pubmed.ncbi.nlm.nih.gov/${this.value}`
|
||||
: null;
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
// Compound link widget
|
||||
Alpine.data(
|
||||
"compoundWidget",
|
||||
(fieldName, data, schema, uiSchema, mode, debugErrors, context = null) => ({
|
||||
...baseWidget(
|
||||
fieldName,
|
||||
data,
|
||||
schema,
|
||||
uiSchema,
|
||||
mode,
|
||||
debugErrors,
|
||||
context,
|
||||
),
|
||||
|
||||
get value() {
|
||||
return this.data[this.fieldName] || "";
|
||||
},
|
||||
set value(v) {
|
||||
this.data[this.fieldName] = v;
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
// TimeSeries table widget
|
||||
Alpine.data(
|
||||
"timeseriesTableWidget",
|
||||
(fieldName, data, schema, uiSchema, mode, debugErrors, context = null) => ({
|
||||
...baseWidget(
|
||||
fieldName,
|
||||
data,
|
||||
schema,
|
||||
uiSchema,
|
||||
mode,
|
||||
debugErrors,
|
||||
context,
|
||||
),
|
||||
|
||||
chartInstance: null,
|
||||
|
||||
// Getter/setter for measurements array
|
||||
get measurements() {
|
||||
return this.data[this.fieldName] || [];
|
||||
},
|
||||
set measurements(v) {
|
||||
this.data[this.fieldName] = v;
|
||||
},
|
||||
|
||||
// Get description from sibling field
|
||||
get description() {
|
||||
return this.data?.description || "";
|
||||
},
|
||||
|
||||
// Get method from sibling field
|
||||
get method() {
|
||||
return this.data?.method || "";
|
||||
},
|
||||
|
||||
// Computed property for chart options
|
||||
get chartOptions() {
|
||||
return {
|
||||
measurements: this.measurements,
|
||||
xAxisLabel: this.data?.x_axis_label || "Time",
|
||||
yAxisLabel: this.data?.y_axis_label || "Value",
|
||||
xAxisUnit: this.data?.x_axis_unit || "",
|
||||
yAxisUnit: this.data?.y_axis_unit || "",
|
||||
};
|
||||
},
|
||||
|
||||
// Add new measurement
|
||||
addMeasurement() {
|
||||
if (!this.data[this.fieldName]) {
|
||||
this.data[this.fieldName] = [];
|
||||
}
|
||||
this.data[this.fieldName].push({
|
||||
timestamp: null,
|
||||
value: null,
|
||||
error: null,
|
||||
note: "",
|
||||
});
|
||||
},
|
||||
|
||||
// Remove measurement by index
|
||||
removeMeasurement(index) {
|
||||
if (
|
||||
this.data[this.fieldName] &&
|
||||
Array.isArray(this.data[this.fieldName])
|
||||
) {
|
||||
this.data[this.fieldName].splice(index, 1);
|
||||
}
|
||||
},
|
||||
|
||||
// Update specific measurement field
|
||||
updateMeasurement(index, field, value) {
|
||||
if (this.data[this.fieldName] && this.data[this.fieldName][index]) {
|
||||
if (field === "timestamp" || field === "value" || field === "error") {
|
||||
// Parse all numeric fields (timestamp is days as float)
|
||||
this.data[this.fieldName][index][field] =
|
||||
value === "" || value === null ? null : parseFloat(value);
|
||||
} else {
|
||||
// Store other fields as-is
|
||||
this.data[this.fieldName][index][field] = value;
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
// Format timestamp for display (timestamp is numeric days as float)
|
||||
formatTimestamp(timestamp) {
|
||||
return timestamp ?? "";
|
||||
},
|
||||
|
||||
// Sort by timestamp (numeric days)
|
||||
sortByTimestamp() {
|
||||
if (
|
||||
this.data[this.fieldName] &&
|
||||
Array.isArray(this.data[this.fieldName])
|
||||
) {
|
||||
this.data[this.fieldName].sort((a, b) => {
|
||||
const tsA = a.timestamp ?? Infinity;
|
||||
const tsB = b.timestamp ?? Infinity;
|
||||
return tsA - tsB;
|
||||
});
|
||||
}
|
||||
},
|
||||
|
||||
// Chart lifecycle methods (delegates to TimeSeriesChart utility)
|
||||
initChart() {
|
||||
if (!this.isViewMode || !window.Chart || !window.TimeSeriesChart)
|
||||
return;
|
||||
|
||||
const canvas = this.$refs?.chartCanvas;
|
||||
if (!canvas) return;
|
||||
|
||||
this.destroyChart();
|
||||
|
||||
if (this.measurements.length === 0) return;
|
||||
|
||||
this.chartInstance = window.TimeSeriesChart.create(
|
||||
canvas,
|
||||
this.chartOptions,
|
||||
);
|
||||
},
|
||||
|
||||
updateChart() {
|
||||
if (!this.chartInstance || !this.isViewMode) return;
|
||||
window.TimeSeriesChart.update(
|
||||
this.chartInstance,
|
||||
this.measurements,
|
||||
this.chartOptions,
|
||||
);
|
||||
},
|
||||
|
||||
destroyChart() {
|
||||
if (this.chartInstance) {
|
||||
window.TimeSeriesChart.destroy(this.chartInstance);
|
||||
this.chartInstance = null;
|
||||
}
|
||||
},
|
||||
|
||||
// Alpine lifecycle hooks
|
||||
init() {
|
||||
if (this.isViewMode && window.Chart) {
|
||||
// Use $nextTick to ensure DOM is ready
|
||||
this.$nextTick(() => {
|
||||
this.initChart();
|
||||
});
|
||||
|
||||
// Watch measurements array for changes and update chart
|
||||
this.$watch("data." + this.fieldName, () => {
|
||||
if (this.chartInstance) {
|
||||
this.updateChart();
|
||||
}
|
||||
});
|
||||
}
|
||||
},
|
||||
}),
|
||||
);
|
||||
});
|
||||
265
static/js/alpine/index.js
Normal file
@ -0,0 +1,265 @@
|
||||
/**
|
||||
* Alpine.js Components for enviPath
|
||||
*
|
||||
* This module provides reusable Alpine.js data components for modals,
|
||||
* form validation, and form submission.
|
||||
*/
|
||||
|
||||
document.addEventListener('alpine:init', () => {
|
||||
/**
|
||||
* Modal Form Component
|
||||
*
|
||||
* Provides form validation using HTML5 Constraint Validation API,
|
||||
* loading states for submission, and error message management.
|
||||
*
|
||||
* Basic Usage:
|
||||
* <dialog x-data="modalForm()" @close="reset()">
|
||||
* <form id="my-form">
|
||||
* <input name="field" required>
|
||||
* </form>
|
||||
* <button @click="submit('my-form')" :disabled="isSubmitting">Submit</button>
|
||||
* </dialog>
|
||||
*
|
||||
* With Custom State:
|
||||
* <dialog x-data="modalForm({ state: { selectedItem: '', imageUrl: '' } })" @close="reset()">
|
||||
* <select x-model="selectedItem" @change="updateImagePreview(selectedItem + '?image=svg')">
|
||||
* <img :src="imageUrl" x-show="imageUrl">
|
||||
* </dialog>
|
||||
*
|
||||
* With AJAX:
|
||||
* <button @click="submitAsync('my-form', { onSuccess: (data) => console.log(data) })">
|
||||
*/
|
||||
Alpine.data('modalForm', (options = {}) => ({
|
||||
isSubmitting: false,
|
||||
errors: {},
|
||||
// Spread custom initial state from options
|
||||
...(options.state || {}),
|
||||
|
||||
/**
|
||||
* Validate a single field using HTML5 Constraint Validation API
|
||||
* @param {HTMLElement} field - The input/select/textarea element
|
||||
*/
|
||||
validateField(field) {
|
||||
const name = field.name || field.id;
|
||||
if (!name) return;
|
||||
|
||||
if (!field.validity.valid) {
|
||||
this.errors[name] = field.validationMessage;
|
||||
} else {
|
||||
delete this.errors[name];
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* Clear error for a field (call on input)
|
||||
* @param {HTMLElement} field - The input element
|
||||
*/
|
||||
clearError(field) {
|
||||
const name = field.name || field.id;
|
||||
if (name && this.errors[name]) {
|
||||
delete this.errors[name];
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* Get error message for a field
|
||||
* @param {string} name - Field name
|
||||
* @returns {string|undefined} Error message or undefined
|
||||
*/
|
||||
getError(name) {
|
||||
return this.errors[name];
|
||||
},
|
||||
|
||||
/**
|
||||
* Check if form has any errors
|
||||
* @returns {boolean} True if there are errors
|
||||
*/
|
||||
hasErrors() {
|
||||
return Object.keys(this.errors).length > 0;
|
||||
},
|
||||
|
||||
/**
|
||||
* Validate all fields in a form
|
||||
* @param {string} formId - The form element ID
|
||||
* @returns {boolean} True if form is valid
|
||||
*/
|
||||
validateAll(formId) {
|
||||
const form = document.getElementById(formId);
|
||||
if (!form) return false;
|
||||
|
||||
this.errors = {};
|
||||
const fields = form.querySelectorAll('input, select, textarea');
|
||||
|
||||
fields.forEach(field => {
|
||||
if (field.name && !field.validity.valid) {
|
||||
this.errors[field.name] = field.validationMessage;
|
||||
}
|
||||
});
|
||||
|
||||
return !this.hasErrors();
|
||||
},
|
||||
|
||||
/**
|
||||
* Validate that two password fields match
|
||||
* @param {string} password1Id - ID of first password field
|
||||
* @param {string} password2Id - ID of second password field
|
||||
* @returns {boolean} True if passwords match
|
||||
*/
|
||||
validatePasswordMatch(password1Id, password2Id) {
|
||||
const pw1 = document.getElementById(password1Id);
|
||||
const pw2 = document.getElementById(password2Id);
|
||||
|
||||
if (!pw1 || !pw2) return false;
|
||||
|
||||
if (pw1.value !== pw2.value) {
|
||||
this.errors[pw2.name || password2Id] = 'Passwords do not match';
|
||||
pw2.setCustomValidity('Passwords do not match');
|
||||
return false;
|
||||
}
|
||||
|
||||
delete this.errors[pw2.name || password2Id];
|
||||
pw2.setCustomValidity('');
|
||||
return true;
|
||||
},
|
||||
|
||||
/**
|
||||
* Submit a form with loading state
|
||||
* @param {string} formId - The form element ID
|
||||
*/
|
||||
submit(formId) {
|
||||
const form = document.getElementById(formId);
|
||||
if (!form) return;
|
||||
|
||||
// Validate before submit
|
||||
if (!form.checkValidity()) {
|
||||
form.reportValidity();
|
||||
return;
|
||||
}
|
||||
|
||||
// Set action to current URL if empty
|
||||
if (!form.action || form.action === window.location.href + '#') {
|
||||
form.action = window.location.href;
|
||||
}
|
||||
|
||||
// Set loading state and submit
|
||||
this.isSubmitting = true;
|
||||
form.submit();
|
||||
},
|
||||
|
||||
/**
|
||||
* Submit form via AJAX (fetch)
|
||||
* @param {string} formId - The form element ID
|
||||
* @param {Object} options - Options { onSuccess, onError, closeOnSuccess }
|
||||
*/
|
||||
async submitAsync(formId, options = {}) {
|
||||
const form = document.getElementById(formId);
|
||||
if (!form) return;
|
||||
|
||||
// Validate before submit
|
||||
if (!form.checkValidity()) {
|
||||
form.reportValidity();
|
||||
return;
|
||||
}
|
||||
|
||||
this.isSubmitting = true;
|
||||
|
||||
try {
|
||||
const formData = new FormData(form);
|
||||
const response = await fetch(form.action || window.location.href, {
|
||||
method: form.method || 'POST',
|
||||
body: formData,
|
||||
headers: {
|
||||
'X-Requested-With': 'XMLHttpRequest',
|
||||
},
|
||||
});
|
||||
|
||||
const data = await response.json().catch(() => ({}));
|
||||
|
||||
if (response.ok) {
|
||||
if (options.onSuccess) {
|
||||
options.onSuccess(data);
|
||||
}
|
||||
|
||||
if (data.redirect || data.success) {
|
||||
window.location.href = data.redirect || data.success;
|
||||
} else if (options.closeOnSuccess) {
|
||||
this.$el.closest('dialog')?.close();
|
||||
}
|
||||
} else {
|
||||
const errorMsg = data.error || data.message || `Error: ${response.status}`;
|
||||
this.errors['_form'] = errorMsg;
|
||||
|
||||
if (options.onError) {
|
||||
options.onError(errorMsg, data);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
this.errors['_form'] = error.message;
|
||||
|
||||
if (options.onError) {
|
||||
options.onError(error.message);
|
||||
}
|
||||
} finally {
|
||||
this.isSubmitting = false;
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* Set form action URL dynamically
|
||||
* @param {string} formId - The form element ID
|
||||
* @param {string} url - The URL to set as action
|
||||
*/
|
||||
setFormAction(formId, url) {
|
||||
const form = document.getElementById(formId);
|
||||
if (form) {
|
||||
form.action = url;
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* Update image preview
|
||||
* @param {string} url - Image URL (with query params)
|
||||
* @param {string} targetId - Target element ID for the image
|
||||
*/
|
||||
updateImagePreview(url) {
|
||||
// Store URL for reactive binding with :src
|
||||
this.imageUrl = url;
|
||||
},
|
||||
|
||||
/**
|
||||
* Reset form state (call on modal close)
|
||||
* Resets to initial state from options
|
||||
*/
|
||||
reset() {
|
||||
this.isSubmitting = false;
|
||||
this.errors = {};
|
||||
this.imageUrl = '';
|
||||
|
||||
// Reset custom state to initial values
|
||||
if (options.state) {
|
||||
Object.keys(options.state).forEach(key => {
|
||||
this[key] = options.state[key];
|
||||
});
|
||||
}
|
||||
|
||||
// Call custom reset handler if provided
|
||||
if (options.onReset) {
|
||||
options.onReset.call(this);
|
||||
}
|
||||
}
|
||||
}));
|
||||
|
||||
/**
|
||||
* Simple Modal Component (no form)
|
||||
*
|
||||
* For modals that don't need form validation.
|
||||
*
|
||||
* Usage:
|
||||
* <dialog x-data="modal()">
|
||||
* <button @click="$el.closest('dialog').close()">Close</button>
|
||||
* </dialog>
|
||||
*/
|
||||
Alpine.data('modal', () => ({
|
||||
// Placeholder for simple modals that may need state later
|
||||
}));
|
||||
});
|
||||
148
static/js/alpine/pagination.js
Normal file
@ -0,0 +1,148 @@
|
||||
/**
|
||||
* Alpine.js Pagination Component
|
||||
*
|
||||
* Provides client-side pagination for large lists.
|
||||
*/
|
||||
|
||||
document.addEventListener('alpine:init', () => {
|
||||
Alpine.data('remotePaginatedList', (options = {}) => ({
|
||||
items: [],
|
||||
currentPage: 1,
|
||||
totalPages: 0,
|
||||
totalItems: 0,
|
||||
perPage: options.perPage || 50,
|
||||
endpoint: options.endpoint || '',
|
||||
isReviewed: options.isReviewed || false,
|
||||
instanceId: options.instanceId || Math.random().toString(36).substring(2, 9),
|
||||
isLoading: false,
|
||||
error: null,
|
||||
|
||||
init() {
|
||||
if (this.endpoint) {
|
||||
this.fetchPage(1);
|
||||
}
|
||||
},
|
||||
|
||||
get paginatedItems() {
|
||||
return this.items;
|
||||
},
|
||||
|
||||
get showingStart() {
|
||||
if (this.totalItems === 0) return 0;
|
||||
return (this.currentPage - 1) * this.perPage + 1;
|
||||
},
|
||||
|
||||
get showingEnd() {
|
||||
if (this.totalItems === 0) return 0;
|
||||
return Math.min((this.currentPage - 1) * this.perPage + this.items.length, this.totalItems);
|
||||
},
|
||||
|
||||
async fetchPage(page) {
|
||||
if (!this.endpoint) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.isLoading = true;
|
||||
this.error = null;
|
||||
this.$dispatch('loading-start');
|
||||
|
||||
try {
|
||||
const url = new URL(this.endpoint, window.location.origin);
|
||||
// Preserve existing query parameters and add pagination params
|
||||
url.searchParams.set('page', page.toString());
|
||||
url.searchParams.set('page_size', this.perPage.toString());
|
||||
|
||||
const response = await fetch(url.toString(), {
|
||||
headers: { Accept: 'application/json' },
|
||||
credentials: 'same-origin'
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to load ${this.endpoint} (status ${response.status})`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
this.items = data.items || [];
|
||||
this.totalItems = data.total_items || 0;
|
||||
this.totalPages = data.total_pages || 0;
|
||||
this.currentPage = data.page || page;
|
||||
this.perPage = data.page_size || this.perPage;
|
||||
|
||||
// Dispatch event for parent components (e.g., tab count updates)
|
||||
this.$dispatch('items-loaded', { totalItems: this.totalItems });
|
||||
} catch (err) {
|
||||
console.error(err);
|
||||
this.error = `Unable to load ${this.endpoint}. Please try again.`;
|
||||
} finally {
|
||||
this.isLoading = false;
|
||||
this.$dispatch('loading-end');
|
||||
}
|
||||
},
|
||||
|
||||
nextPage() {
|
||||
if (this.currentPage < this.totalPages) {
|
||||
this.fetchPage(this.currentPage + 1);
|
||||
}
|
||||
},
|
||||
|
||||
prevPage() {
|
||||
if (this.currentPage > 1) {
|
||||
this.fetchPage(this.currentPage - 1);
|
||||
}
|
||||
},
|
||||
|
||||
goToPage(page) {
|
||||
if (page >= 1 && page <= this.totalPages) {
|
||||
this.fetchPage(page);
|
||||
}
|
||||
},
|
||||
|
||||
get pageNumbers() {
|
||||
const pages = [];
|
||||
const total = this.totalPages;
|
||||
const current = this.currentPage;
|
||||
|
||||
if (total === 0) {
|
||||
return pages;
|
||||
}
|
||||
|
||||
if (total <= 7) {
|
||||
for (let i = 1; i <= total; i++) {
|
||||
pages.push({ page: i, isEllipsis: false, key: `${this.instanceId}-page-${i}` });
|
||||
}
|
||||
} else {
|
||||
pages.push({ page: 1, isEllipsis: false, key: `${this.instanceId}-page-1` });
|
||||
|
||||
let rangeStart;
|
||||
let rangeEnd;
|
||||
|
||||
if (current <= 4) {
|
||||
rangeStart = 2;
|
||||
rangeEnd = 5;
|
||||
} else if (current >= total - 3) {
|
||||
rangeStart = total - 4;
|
||||
rangeEnd = total - 1;
|
||||
} else {
|
||||
rangeStart = current - 1;
|
||||
rangeEnd = current + 1;
|
||||
}
|
||||
|
||||
if (rangeStart > 2) {
|
||||
pages.push({ page: '...', isEllipsis: true, key: `${this.instanceId}-ellipsis-start` });
|
||||
}
|
||||
|
||||
for (let i = rangeStart; i <= rangeEnd; i++) {
|
||||
pages.push({ page: i, isEllipsis: false, key: `${this.instanceId}-page-${i}` });
|
||||
}
|
||||
|
||||
if (rangeEnd < total - 1) {
|
||||
pages.push({ page: '...', isEllipsis: true, key: `${this.instanceId}-ellipsis-end` });
|
||||
}
|
||||
|
||||
pages.push({ page: total, isEllipsis: false, key: `${this.instanceId}-page-${total}` });
|
||||
}
|
||||
|
||||
return pages;
|
||||
}
|
||||
}));
|
||||
});
|
||||
134
static/js/alpine/pathway.js
Normal file
@ -0,0 +1,134 @@
|
||||
/**
|
||||
* Pathway Viewer Alpine.js Component
|
||||
*
|
||||
* Provides reactive status management and polling for pathway predictions.
|
||||
* Handles status updates, change detection, and update notices.
|
||||
*/
|
||||
|
||||
document.addEventListener('alpine:init', () => {
|
||||
/**
|
||||
* Pathway Viewer Component
|
||||
*
|
||||
* Usage:
|
||||
* <div x-data="pathwayViewer({
|
||||
* status: 'running',
|
||||
* modified: '2024-01-01T00:00:00Z',
|
||||
* statusUrl: '/pathway/123?status=true'
|
||||
* })" x-init="init()">
|
||||
* ...
|
||||
* </div>
|
||||
*/
|
||||
Alpine.data('pathwayViewer', (config) => ({
|
||||
status: config.status,
|
||||
modified: config.modified,
|
||||
modifiedDate: null,
|
||||
statusUrl: config.statusUrl,
|
||||
emptyDueToThreshold: config.emptyDueToThreshold === "True",
|
||||
showUpdateNotice: false,
|
||||
showEmptyDueToThresholdNotice: false,
|
||||
emptyDueToThresholdMessage: 'The Pathway is empty due to the selected threshold. Please try a different threshold.',
|
||||
updateMessage: '',
|
||||
pollInterval: null,
|
||||
|
||||
get statusTooltip() {
|
||||
const tooltips = {
|
||||
'completed': 'Pathway prediction completed.',
|
||||
'failed': 'Pathway prediction failed.',
|
||||
'running': 'Pathway prediction running.'
|
||||
};
|
||||
return tooltips[this.status] || '';
|
||||
},
|
||||
|
||||
init() {
|
||||
this.modifiedDate = this.parseDate(this.modified);
|
||||
|
||||
if (this.status === 'running') {
|
||||
this.startPolling();
|
||||
}
|
||||
|
||||
if (this.emptyDueToThreshold) {
|
||||
this.showEmptyDueToThresholdNotice = true;
|
||||
}
|
||||
|
||||
},
|
||||
|
||||
startPolling() {
|
||||
if (this.pollInterval) {
|
||||
return;
|
||||
}
|
||||
this.pollInterval = setInterval(() => this.checkStatus(), 5000);
|
||||
},
|
||||
|
||||
async checkStatus() {
|
||||
try {
|
||||
const response = await fetch(this.statusUrl);
|
||||
const data = await response.json();
|
||||
|
||||
if (data.emptyDueToThreshold) {
|
||||
this.emptyDueToThreshold = true;
|
||||
this.showEmptyDueToThresholdNotice = true;
|
||||
}
|
||||
|
||||
const nextModifiedDate = this.parseDate(data.modified);
|
||||
const modifiedChanged = this.hasNewerTimestamp(nextModifiedDate, this.modifiedDate);
|
||||
const statusChanged = data.status !== this.status;
|
||||
|
||||
if ((modifiedChanged || statusChanged) && !this.emptyDueToThreshold) {
|
||||
this.showUpdateNotice = true;
|
||||
this.updateMessage = this.getUpdateMessage(data.status, modifiedChanged, statusChanged);
|
||||
}
|
||||
|
||||
this.modified = data.modified;
|
||||
this.modifiedDate = nextModifiedDate;
|
||||
this.status = data.status;
|
||||
|
||||
if (data.status !== 'running' && this.pollInterval) {
|
||||
clearInterval(this.pollInterval);
|
||||
this.pollInterval = null;
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Polling error:', err);
|
||||
}
|
||||
},
|
||||
|
||||
getUpdateMessage(status, modifiedChanged, statusChanged) {
|
||||
// Prefer explicit status change messaging, otherwise fall back to modified change copy
|
||||
if (statusChanged) {
|
||||
if (status === 'completed') {
|
||||
return 'Prediction completed. Reload the page to see the updated Pathway.';
|
||||
}
|
||||
if (status === 'failed') {
|
||||
return 'Prediction failed. Reload the page to see the latest status.';
|
||||
}
|
||||
}
|
||||
|
||||
let msg = 'Prediction ';
|
||||
|
||||
if (status === 'running') {
|
||||
msg += 'is still running. But the Pathway was updated.';
|
||||
} else if (status === 'completed') {
|
||||
msg += 'is completed. Reload the page to see the updated Pathway.';
|
||||
} else if (status === 'failed') {
|
||||
msg += 'failed. Reload the page to see the current shape.';
|
||||
}
|
||||
|
||||
return msg;
|
||||
},
|
||||
|
||||
parseDate(dateString) {
|
||||
// Normalize "YYYY-MM-DD HH:mm:ss" into an ISO-compatible string to avoid locale issues
|
||||
if (!dateString) return null;
|
||||
return new Date(dateString.replace(' ', 'T'));
|
||||
},
|
||||
|
||||
hasNewerTimestamp(nextDate, currentDate) {
|
||||
if (!nextDate) return false;
|
||||
if (!currentDate) return true;
|
||||
return nextDate.getTime() > currentDate.getTime();
|
||||
},
|
||||
|
||||
reloadPage() {
|
||||
location.reload();
|
||||
}
|
||||
}));
|
||||
});
|
||||