Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions CHECKS
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Dokku checks
# Require that the app is up (URL returns 200) before migrating traffic.
#
# Wait 10 seconds between checks
WAIT=10
# Timeout after a minute
TIMEOUT=6
# Attempt checks 10 times
ATTEMPTS=10

# Check root URL
/
47 changes: 47 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
# See the upstream documentation for how this nginx+uwsgi Python image works
# It's based on Debian 11 Bullseye which is supported until August 31st, 2026
#
# https://github.com/tiangolo/uwsgi-nginx-docker
#
FROM tiangolo/uwsgi-nginx:python3.12

# Create virtualenv
RUN python3 -m venv /app/.ve
RUN . /app/.ve/bin/activate && pip install --no-cache-dir --upgrade pip wheel setuptools

# Copy in setup & install requirements
COPY tools/ /app/tools/
COPY lib360dataquality/ /app/lib360dataquality/
COPY setup.py requirements_cove.txt LICENCE AUTHORS /app/

RUN . /app/.ve/bin/activate && pip install --no-cache-dir --upgrade -r /app/requirements_cove.txt

# Copy in the application code
COPY cove/ /app/cove/

# The dqt expects to exist within a git repo
# in order to display the version
COPY .git/ /app/.git/

# The nginx+uwsgi image expects uwsgi config file at /app/uwsgi.ini
# prestart.sh script runs before app startup
COPY uwsgi.ini /app/uwsgi.ini
COPY prestart.sh /app/prestart.sh

# Nginx Maximum file upload size - what's the biggest 360 spreadsheet?
# https://github.com/tiangolo/uwsgi-nginx-docker?tab=readme-ov-file#custom-max-upload-size
ENV NGINX_MAX_UPLOAD=1G

# Place the database in a persistent volume.
# Note that Dokku must also be configured to mount
# the dir as persistent storage:
# https://dokku.com/docs/advanced-usage/persistent-storage/
# e.g.
# > dokku storage:ensure-directory --chown root dqt-persist-data
# > dokku storage:mount <dqt-app> /var/lib/dokku/data/storage/dqt-persist-data:/data
# (--chown root because this container runs the app as root)
VOLUME /data
ENV DB_NAME=/data/db.sqlite3
ENV MEDIA_ROOT=/data/media/

# Healthchecks are handled by Dokku, see CHECKS file.
1 change: 0 additions & 1 deletion Procfile

This file was deleted.

7 changes: 0 additions & 7 deletions app.json

This file was deleted.

3 changes: 2 additions & 1 deletion cove/cove_project/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
env = environ.Env( # set default values and casting
DB_NAME=(str, os.path.join(BASE_DIR, 'db.sqlite3')),
SENTRY_DSN=(str, ''),
MEDIA_ROOT=(str, os.path.join(BASE_DIR, 'media')),
)

# We use the setting to choose whether to show the section about Sentry in the
Expand All @@ -32,7 +33,7 @@
# We can't take MEDIA_ROOT and MEDIA_URL from cove settings,
# ... otherwise the files appear under the BASE_DIR that is the Cove library install.
# That could get messy. We want them to appear in our directory.
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_ROOT = env('MEDIA_ROOT')
MEDIA_URL = '/media/'

SECRET_KEY = settings.SECRET_KEY
Expand Down
9 changes: 9 additions & 0 deletions prestart.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
#!/bin/sh
set -eu

# Run DB migrations & collect static before starting the app
cd /app/cove/
. /app/.ve/bin/activate
set -x
python3 manage.py migrate
python3 manage.py collectstatic
25 changes: 25 additions & 0 deletions uwsgi.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
[uwsgi]
wsgi-file=/app/cove/cove_project/wsgi.py
pythonpath=/app/cove
virtualenv=/app/.ve

# ==== Tuning ====
# Enable Python threads
# https://github.com/OpenDataServices/cove/issues/486
enable-threads = true
# At least two workers
cheaper = 2
# Start off with two workers
cheaper-initial = 2
# Spawn up to 100 workers as needed
workers = 100
# One thread per process so they can be killed without affecting other requests
threads = 1
max-requests = 1024
memory-report = true

limit-as = 9000
harakiri = 6000

# If memory usage of a worker > 250MB at the *end* of a request, then reload it
reload-on-as = 250