diff --git a/README.md b/README.md
index 9da4d07..7f81704 100644
--- a/README.md
+++ b/README.md
@@ -4,25 +4,46 @@ a very basic django app to run "digital exhibition websites"
# Install for development
-We use docker to make development easier:
+We use docker to make development easier, even if you can run the app without it.
+Let's create the two env files, one for docker and one for pipenv:
- cp docker/.env.example docker/.env
+```bash
+ cp ./docker/.env.example ./docker/.env
+ cp .env.example .development.env
+```
-edit the `./docker/.env` file using a proper database name and change the password; then
-copy the `./example.env` file to `.env` and fill the fields using the same database name and password.
+Now edit the `./docker/.env` file by choosing proper **database configss** and change the password; then
+edit the `.development.env` and fill the fields using the same configuration.
This second step is needed because the **environment variable names** are different in
docker and in miller.
+```ini
SECRET_KEY=*****
DEBUG=True
MILLER_DATABASE_NAME=your db name
MILLER_DATABASE_USER=your db user
MILLER_DATABASE_PASSWORD=your db pass
MILLER_DATABASE_HOST=localhost
+```
then start the development docker with:
- make run-dev
+```bash
+ ENV=development make run-pipenv
+```
+
+And in another terminal:
+
+```bash
+ ENV=development
+```
+
+Under the hood `make run-pipenv` runs the following command:
+
+```bash
+cd docker && docker compose down --remove-orphans && \
+ docker compose --env-file=../.${ENV}.env -f docker-compose.pipenv.yml up
+```
This will install all images (redis, postgres...) and build locally celery and miller for you.
`Watchdog` takes care of restarting miller and celery when a py file change in the codebase.
@@ -65,7 +86,7 @@ We still recommend to run docker image for running Postgres and Redis:
-e POSTGRES_USER=miller \
-e PGDATA=/var/lib/postgresql/data/pgdata \
-v "$PWD/docker/postgres-data:/var/lib/postgresql/data" \
- -p 54320:5432 \
+ -p 5432:5432 \
postgres:14.1
In this case, use the sae POSTGRES_PASSWORD and POSTGRES_USER in the env file and
diff --git a/docker/.env.example b/docker/.env.example
index ab35814..b30cff1 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -1,6 +1,3 @@
-POSTGRES_USER=miller
-POSTGRES_DB=miller
-POSTGRES_PASSWORD=miller
MILLER_TAG=latest
DEBUG=False
ALLOWED_HOSTS=localhost,127.0.0.1
@@ -8,3 +5,6 @@ STATIC_URL=/miller-assets/
MILLER_SCHEMA_ROOT=/contents/schema
LANGUAGES="en|American English|en_US|english,fr|French|fr_FR|french,de|German|de_DE|german"
NGINX_PORT=80
+MILLER_DATABASE_USER=****
+MILLER_DATABASE_PASSWORD=****
+MILLER_DATABASE_NAME=****
\ No newline at end of file
diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml
index 68f1c48..d8b8f2d 100644
--- a/docker/docker-compose.dev.yml
+++ b/docker/docker-compose.dev.yml
@@ -7,15 +7,15 @@ services:
- ./data/redis:/data:z
entrypoint: redis-server --appendonly yes
ports:
- - "63790:6379"
+ - '63790:6379'
postgresdb:
image: postgres:14
ports:
- - "54320:5432"
+ - '54320:5432'
environment:
- POSTGRES_USER: ${POSTGRES_USER}
- POSTGRES_DB: ${POSTGRES_DB}
- POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
+ POSTGRES_USER: ${MILLER_DATABASE_USER}
+ POSTGRES_DB: ${MILLER_DATABASE_NAME}
+ POSTGRES_PASSWORD: ${MILLER_DATABASE_PASSWORD}
volumes:
- ./data/initdb.d:/docker-entrypoint-initdb.d:Z
- ./data/postgres:/var/lib/postgresql/data:Z
@@ -26,9 +26,9 @@ services:
environment:
DEBUG: ${DEBUG}
LANGUAGES: ${LANGUAGES}
- MILLER_DATABASE_NAME: ${POSTGRES_DB}
- MILLER_DATABASE_USER: ${POSTGRES_USER}
- MILLER_DATABASE_PASSWORD: ${POSTGRES_PASSWORD}
+ MILLER_DATABASE_NAME: ${MILLER_DATABASE_NAME}
+ MILLER_DATABASE_USER: ${MILLER_DATABASE_USER}
+ MILLER_DATABASE_PASSWORD: ${MILLER_DATABASE_PASSWORD}
MILLER_DATABASE_HOST: postgresdb
MILLER_DATABASE_PORT: 5432
MILLER_SCHEMA_ROOT: ${MILLER_SCHEMA_ROOT}
@@ -57,9 +57,9 @@ services:
ALLOWED_HOSTS: ${ALLOWED_HOSTS}
LANGUAGES: ${LANGUAGES}
STATIC_URL: ${STATIC_URL}
- MILLER_DATABASE_NAME: ${POSTGRES_DB}
- MILLER_DATABASE_USER: ${POSTGRES_USER}
- MILLER_DATABASE_PASSWORD: ${POSTGRES_PASSWORD}
+ MILLER_DATABASE_NAME: ${MILLER_DATABASE_NAME}
+ MILLER_DATABASE_USER: ${MILLER_DATABASE_USER}
+ MILLER_DATABASE_PASSWORD: ${MILLER_DATABASE_PASSWORD}
MILLER_DATABASE_HOST: postgresdb
MILLER_DATABASE_PORT: 5432
MILLER_SCHEMA_ROOT: ${MILLER_SCHEMA_ROOT}
@@ -75,12 +75,12 @@ services:
- redis
- postgresdb
entrypoint:
- - "/bin/sh"
+ - '/bin/sh'
- -c
- |
- PYTHONPATH=/ python miller/dbconnection.py \
- && python manage.py migrate \
- && watchmedo auto-restart --recursive --patterns="*.py;*.json;*.js;*.html" --directory="/miller" -- /bin/sh -i miller/docker-compose-dev.entrypoint.sh
+ PYTHONPATH=/ python miller/dbconnection.py \
+ && python manage.py migrate \
+ && watchmedo auto-restart --recursive --patterns="*.py;*.json;*.js;*.html" --directory="/miller" -- /bin/sh -i miller/docker-compose-dev.entrypoint.sh
# ports:
# - 8008:8000
volumes:
@@ -93,7 +93,7 @@ services:
restart: always
image: nginx:1.17-alpine
ports:
- - "${NGINX_PORT}:80"
+ - '${NGINX_PORT}:80'
depends_on:
- miller
- celery
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index 2ee6748..4b66c2d 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -9,9 +9,9 @@ services:
postgresdb:
image: postgres:14
environment:
- POSTGRES_USER: ${POSTGRES_USER}
- POSTGRES_DB: ${POSTGRES_DB}
- POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
+ POSTGRES_USER: ${MILLER_DATABASE_USER}
+ POSTGRES_DB: ${MILLER_DATABASE_NAME}
+ POSTGRES_PASSWORD: ${MILLER_DATABASE_PASSWORD}
volumes:
- ./data/initdb.d:/docker-entrypoint-initdb.d:Z
- ./data/postgres:/var/lib/postgresql/data:Z
@@ -19,17 +19,17 @@ services:
image: c2dhunilu/miller-v2:${MILLER_TAG}
restart: always
environment:
- DEBUG: ${DEBUG}
- LANGUAGES: ${LANGUAGES}
- MILLER_DATABASE_NAME: ${POSTGRES_DB}
- MILLER_DATABASE_USER: ${POSTGRES_USER}
- MILLER_DATABASE_PASSWORD: ${POSTGRES_PASSWORD}
- MILLER_DATABASE_HOST: postgresdb
- MILLER_DATABASE_PORT: 5432
- MILLER_SCHEMA_ROOT: ${MILLER_SCHEMA_ROOT}
- REDIS_HOST: redis
- REDIS_PORT: 6379
- STATIC_URL: ${STATIC_URL}
+ DEBUG: ${DEBUG}
+ LANGUAGES: ${LANGUAGES}
+ MILLER_DATABASE_NAME: ${MILLER_DATABASE_NAME}
+ MILLER_DATABASE_USER: ${POSTGRES_USER}
+ MILLER_DATABASE_PASSWORD: ${MILLER_DATABASE_PASSWORD}
+ MILLER_DATABASE_HOST: postgresdb
+ MILLER_DATABASE_PORT: 5432
+ MILLER_SCHEMA_ROOT: ${MILLER_SCHEMA_ROOT}
+ REDIS_HOST: redis
+ REDIS_PORT: 6379
+ STATIC_URL: ${STATIC_URL}
depends_on:
- miller
entrypoint: celery -A miller worker -l info
@@ -46,9 +46,9 @@ services:
DEBUG: ${DEBUG}
ALLOWED_HOSTS: ${ALLOWED_HOSTS}
LANGUAGES: ${LANGUAGES}
- MILLER_DATABASE_NAME: ${POSTGRES_DB}
+ MILLER_DATABASE_NAME: ${MILLER_DATABASE_NAME}
MILLER_DATABASE_USER: ${POSTGRES_USER}
- MILLER_DATABASE_PASSWORD: ${POSTGRES_PASSWORD}
+ MILLER_DATABASE_PASSWORD: ${MILLER_DATABASE_PASSWORD}
MILLER_DATABASE_HOST: postgresdb
MILLER_DATABASE_PORT: 5432
MILLER_SCHEMA_ROOT: ${MILLER_SCHEMA_ROOT}
@@ -82,7 +82,7 @@ services:
image: nginx:1.17-alpine
restart: always
ports:
- - "${NGINX_PORT}:80"
+ - '${NGINX_PORT}:80'
depends_on:
- miller
- celery
diff --git a/miller/admin.py b/miller/admin.py
index b709109..84c16e7 100644
--- a/miller/admin.py
+++ b/miller/admin.py
@@ -19,50 +19,53 @@
logger = logging.getLogger(__name__)
# document data validation
-document_json_schema = JSONSchema(filepath='document/payload.json')
-document_json_schemas = get_available_schemas(folder='document')
+document_json_schema = JSONSchema(filepath="document/payload.json")
+document_json_schemas = get_available_schemas(folder="document")
+
class DataTypeListFilter(DataPropertyListFilter):
- parameter_name = 'data__type'
- params = ['type', 'data']
+ parameter_name = "data__type"
+ params = ["type", "data"]
class DataProviderListFilter(DataPropertyListFilter):
- parameter_name = 'data__provider'
- params = ['provider', 'data']
+ parameter_name = "data__provider"
+ params = ["provider", "data"]
class StoryAdmin(admin.ModelAdmin):
- list_display = ['title', 'slug', 'status', 'owner', 'date_created', 'date_last_modified']
- list_filter = ('status', 'tags')
- search_fields = ('pk', 'slug', 'short_url', 'title')
- autocomplete_fields = ['covers']
- ordering = ['title']
- actions = ['make_published', 'make_draft', 'populate_search_vectors']
+ list_display = [
+ "title",
+ "slug",
+ "status",
+ "owner",
+ "date_created",
+ "date_last_modified",
+ ]
+ list_filter = ("status", "tags")
+ search_fields = ("pk", "slug", "short_url", "title")
+ autocomplete_fields = ["covers", "tags"]
+ ordering = ["title"]
+ actions = ["make_published", "make_draft", "populate_search_vectors"]
def make_published(self, request, queryset):
rows_updated = queryset.update(status=Story.PUBLIC)
if rows_updated == 1:
message_bit = "1 story was"
else:
- message_bit = F'{rows_updated} stories were'
- self.message_user(
- request,
- F'{message_bit} successfully marked as published.'
- )
+ message_bit = f"{rows_updated} stories were"
+ self.message_user(request, f"{message_bit} successfully marked as published.")
def make_draft(self, request, queryset):
rows_updated = queryset.update(status=Story.DRAFT)
if rows_updated == 1:
message_bit = "1 story was"
else:
- message_bit = F'{rows_updated} stories were'
+ message_bit = f"{rows_updated} stories were"
self.message_user(
- request,
- F'{message_bit} successfully unpublished, marked as DRAFT.'
+ request, f"{message_bit} successfully unpublished, marked as DRAFT."
)
-
def populate_search_vectors(self, request, queryset):
for item in queryset:
update_story_search_vectors(story_pk=item.pk)
@@ -75,75 +78,112 @@ def populate_search_vectors(self, request, queryset):
class DataAdminForm(forms.ModelForm):
def clean_data(self):
- logger.info('clean_data on data')
- data = self.cleaned_data.get('data')
- datatype = data.get('type')
- filename = 'document/payload.json'
+ logger.info("clean_data on data")
+ data = self.cleaned_data.get("data")
+ datatype = data.get("type")
+ filename = "document/payload.json"
try:
# validate schema using specific payload for the data.type, if it exists
- payload_schema = document_json_schemas.get(f'payload.{datatype}.json', None)
+ payload_schema = document_json_schemas.get(f"payload.{datatype}.json", None)
if payload_schema is not None:
- filename = f'document/payload.{datatype}.json'
+ filename = f"document/payload.{datatype}.json"
payload_schema.validate(data)
else:
document_json_schema.validate(data)
except ValidationError as err:
logger.error(
- 'ValidationError on current data (model:{},pk:{}): {}'.format(
+ "ValidationError on current data (model:{},pk:{}): {}".format(
self.instance.__class__.__name__,
self.instance.pk,
err.message,
)
)
raise forms.ValidationError(
- f'Schema loaded from: {filename}. error: {err.message}')
+ f"Schema loaded from: {filename}. error: {err.message}"
+ )
return data
class DocumentAdmin(admin.ModelAdmin):
list_display = (
- 'id', 'slug', 'title', 'type', 'date_last_modified',
- 'attachment', 'thumbnail')
- list_filter = ('type', DataTypeListFilter, DataProviderListFilter)
- search_fields = ('pk', 'slug', 'short_url', 'title')
+ "id",
+ "slug",
+ "title",
+ "type",
+ "date_last_modified",
+ "attachment",
+ "thumbnail",
+ )
+ list_filter = ("type", "tags", DataTypeListFilter, DataProviderListFilter)
+ search_fields = ("pk", "slug", "short_url", "title")
+ autocomplete_fields = ["tags", "documents"]
+
fieldsets = [
- (None, {'fields': ['type', 'short_url', 'title', 'slug']}),
- ('Metadata', {'fields': ['data']}),
- ('Content', {
- 'fields': [
- 'copyrights', 'url', 'owner', 'attachment', 'snapshot',
- 'mimetype', 'locked', 'search_vector',
- 'documents'
- ]
- })
+ (None, {"fields": ["type", "short_url", "title", "slug"]}),
+ ("Metadata", {"fields": ["data"]}),
+ (
+ "Content",
+ {
+ "fields": [
+ "copyrights",
+ "url",
+ "owner",
+ "attachment",
+ "snapshot",
+ "mimetype",
+ "locked",
+ "search_vector",
+ "documents",
+ "tags",
+ ]
+ },
+ ),
+ ]
+ actions = [
+ "populate_search_vectors",
+ "create_document_snapshot",
+ "update_data_by_type",
]
- actions = ['populate_search_vectors', 'create_document_snapshot', 'update_data_by_type']
form = DataAdminForm
- change_form_template = 'miller/document/document_change_form.html'
+ change_form_template = "miller/document/document_change_form.html"
class Media:
- css = {'all': ('css/edit_json_field.css',)}
+ css = {"all": ("css/edit_json_field.css",)}
def thumbnail(self, instance):
resolutions = instance.data.get(settings.MILLER_SIZES_SNAPSHOT_DATA_KEY, None)
if not resolutions:
- has_valid_attachment = instance.attachment and getattr(instance.attachment, 'path', None) and os.path.exists(instance.attachment.path)
+ has_valid_attachment = (
+ instance.attachment
+ and getattr(instance.attachment, "path", None)
+ and os.path.exists(instance.attachment.path)
+ )
if has_valid_attachment:
if instance.type in [Document.IMAGE, Document.PDF]:
- return mark_safe('... ready to be queued to get preview')
+ return mark_safe("... ready to be queued to get preview")
else:
- return f'attachment available, preview not available for type: {instance.type}'
+ return f"attachment available, preview not available for type: {instance.type}"
if instance.url:
- return mark_safe(f'remote url: {instance.url}, no preview available')
- if instance.type in [Document.IMAGE, Document.AUDIO, Document.VIDEO, Document.AV, Document.PDF]:
- return mark_safe('⚠️ attachment not found')
- return ''
- thumbnail = resolutions.get('thumbnail', {})
- return mark_safe(''.format(**thumbnail))
+ return mark_safe(
+ f'remote url: {instance.url}, no preview available'
+ )
+ if instance.type in [
+ Document.IMAGE,
+ Document.AUDIO,
+ Document.VIDEO,
+ Document.AV,
+ Document.PDF,
+ ]:
+ return mark_safe("⚠️ attachment not found")
+ return ""
+ thumbnail = resolutions.get("thumbnail", {})
+ return mark_safe(
+ ''.format(**thumbnail)
+ )
- thumbnail.__name__ = 'Thumbnail'
+ thumbnail.__name__ = "Thumbnail"
def populate_search_vectors(self, request, queryset):
for item in queryset:
@@ -156,14 +196,11 @@ def create_document_snapshot(self, request, queryset):
if rows_updated == 1:
message_bit = "1 document"
else:
- message_bit = f'{rows_updated} documents'
- self.message_user(
- request,
- F'{message_bit} added to the queue'
- )
-
+ message_bit = f"{rows_updated} documents"
+ self.message_user(request, f"{message_bit} added to the queue")
+
create_document_snapshot.short_description = "Create thumbnails"
-
+
def update_data_by_type(self, request, queryset):
for item in queryset:
update_document_data_by_type.delay(document_pk=item.pk)
@@ -171,12 +208,8 @@ def update_data_by_type(self, request, queryset):
if rows_updated == 1:
message_bit = "1 document"
else:
- message_bit = f'{rows_updated} documents'
- self.message_user(
- request,
- F'{message_bit} added to the queue'
- )
-
+ message_bit = f"{rows_updated} documents"
+ self.message_user(request, f"{message_bit} added to the queue")
# Define an inline admin descriptor for Employee model
@@ -184,7 +217,7 @@ def update_data_by_type(self, request, queryset):
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
- verbose_name_plural = 'employee'
+ verbose_name_plural = "employee"
# Define a new User admin
@@ -192,14 +225,21 @@ class UserAdmin(BaseUserAdmin):
inlines = (ProfileInline,)
+class TagAdmin(admin.ModelAdmin):
+ list_display = ["name", "slug", "category"]
+ list_filter = ["category"]
+ search_fields = ["name", "slug"]
+ ordering = ["name"]
+
+
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Story, StoryAdmin)
-admin.site.register(Tag)
+admin.site.register(Tag, TagAdmin)
admin.site.register(Document, DocumentAdmin)
admin.site.register(Caption)
admin.site.register(Mention)
admin.site.register(Author)
admin.site.register(Profile)
-logger.info('admin registered.')
+logger.info("admin registered.")
diff --git a/miller/api/serializers/document.py b/miller/api/serializers/document.py
index d020e0f..c8ecb1c 100644
--- a/miller/api/serializers/document.py
+++ b/miller/api/serializers/document.py
@@ -1,64 +1,89 @@
import logging
from rest_framework import serializers
from ...models.document import Document
+from .tag import LiteTagSerializer
from ...utils.schema import JSONSchema, get_available_schemas
+
# from jsonschema.exceptions import ValidationError
from .fields import RelativeFileField
logger = logging.getLogger(__name__)
-document_json_schema = JSONSchema(filepath='document/payload.json')
-document_json_schemas = get_available_schemas(folder='document')
+document_json_schema = JSONSchema(filepath="document/payload.json")
+document_json_schemas = get_available_schemas(folder="document")
+
class LiteDocumentSerializer(serializers.ModelSerializer):
"""
# light document serializer (to be used in manytomany retrieve)
"""
+
snapshot = RelativeFileField(
- required=False, max_length=None,
- allow_empty_file=True, use_url=True
+ required=False, max_length=None, allow_empty_file=True, use_url=True
)
attachment = RelativeFileField(
- required=False, max_length=None,
- allow_empty_file=True, use_url=True
+ required=False, max_length=None, allow_empty_file=True, use_url=True
)
+ tags = LiteTagSerializer(many=True)
class Meta:
model = Document
fields = (
- 'id', 'title', 'slug', 'mimetype', 'type', 'data', 'url',
- 'attachment', 'snapshot', 'short_url'
+ "id",
+ "title",
+ "slug",
+ "mimetype",
+ "type",
+ "data",
+ "url",
+ "attachment",
+ "snapshot",
+ "short_url",
+ "tags",
+ "documents",
)
class DocumentSerializer(LiteDocumentSerializer):
documents = LiteDocumentSerializer(many=True)
+ tags = LiteTagSerializer(many=True)
snapshot = RelativeFileField(
- required=False, max_length=None,
- allow_empty_file=True, use_url=True
+ required=False, max_length=None, allow_empty_file=True, use_url=True
)
attachment = RelativeFileField(
- required=False, max_length=None,
- allow_empty_file=True, use_url=True
+ required=False, max_length=None, allow_empty_file=True, use_url=True
)
class Meta:
model = Document
fields = (
- 'id', 'url', 'data', 'type', 'slug', 'title', 'snapshot',
- 'copyrights', 'attachment', 'documents', 'locked', 'short_url'
+ "id",
+ "url",
+ "data",
+ "type",
+ "slug",
+ "title",
+ "snapshot",
+ "copyrights",
+ "attachment",
+ "documents",
+ "locked",
+ "short_url",
+ "tags",
)
class CreateDocumentSerializer(LiteDocumentSerializer):
- owner = serializers.HiddenField(
- default=serializers.CurrentUserDefault()
- )
+ owner = serializers.HiddenField(default=serializers.CurrentUserDefault())
# To remove the file
- attachment = serializers.FileField(max_length=None, allow_empty_file=True, allow_null=True, required=False)
+ attachment = serializers.FileField(
+ max_length=None, allow_empty_file=True, allow_null=True, required=False
+ )
# To remove the file
- snapshot = serializers.FileField(max_length=None, allow_empty_file=True, allow_null=True, required=False)
+ snapshot = serializers.FileField(
+ max_length=None, allow_empty_file=True, allow_null=True, required=False
+ )
# Required to have a json object instead of string in the validate_data function
data = serializers.JSONField()
@@ -66,15 +91,25 @@ class CreateDocumentSerializer(LiteDocumentSerializer):
class Meta:
model = Document
fields = (
- 'id', 'owner', 'type', 'data', 'short_url', 'title', 'slug',
- 'copyrights', 'url', 'attachment', 'snapshot', 'mimetype'
+ "id",
+ "owner",
+ "type",
+ "data",
+ "short_url",
+ "title",
+ "slug",
+ "copyrights",
+ "url",
+ "attachment",
+ "snapshot",
+ "mimetype",
)
def validate_data(self, data):
- logger.info('validate_data on data')
+ logger.info("validate_data on data")
## get type from data field
- datatype = str(data.get('type', ''))
- data_schema = document_json_schemas.get(f'payload.{datatype}.json', None)
+ datatype = str(data.get("type", ""))
+ data_schema = document_json_schemas.get(f"payload.{datatype}.json", None)
if data_schema is not None:
errors = data_schema.lazy_validate(data)
else:
@@ -83,13 +118,14 @@ def validate_data(self, data):
error_messages = []
if errors:
for err in errors:
- error_messages.append('Invalid value for %s: %s' % (err.schema['title'], err.message))
+ error_messages.append(
+ "Invalid value for %s: %s" % (err.schema["title"], err.message)
+ )
- if(error_messages):
+ if error_messages:
logger.error(
- 'ValidationError on current data (model:Document,pk:{}): {}'.format(
- self.instance.pk if self.instance else 'New',
- error_messages
+ "ValidationError on current data (model:Document,pk:{}): {}".format(
+ self.instance.pk if self.instance else "New", error_messages
)
)
raise serializers.ValidationError(error_messages)
diff --git a/miller/api/serializers/tag.py b/miller/api/serializers/tag.py
index 6cd0aee..36e6bfa 100644
--- a/miller/api/serializers/tag.py
+++ b/miller/api/serializers/tag.py
@@ -2,17 +2,28 @@
from ...models.tag import Tag
+class LiteTagSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = Tag
+ fields = ("id", "category", "slug", "name", "status", "data")
+
+
# tag represnetation in many to many
class TagSerializer(serializers.ModelSerializer):
- stories = serializers.IntegerField(read_only=True, source='num_stories')
- created = serializers.BooleanField(read_only=True, source='is_created')
+ stories = serializers.IntegerField(read_only=True, source="num_stories")
+ created = serializers.BooleanField(read_only=True, source="is_created")
class Meta:
model = Tag
fields = (
- 'id', 'category', 'slug', 'name', 'status', 'data',
- 'stories',
- 'created'
+ "id",
+ "category",
+ "slug",
+ "name",
+ "status",
+ "data",
+ "stories",
+ "created",
)
# def run_validators(self, value):
@@ -23,9 +34,10 @@ class Meta:
def create(self, validated_data):
instance, created = Tag.objects.get_or_create(
- name=validated_data['name'].lower(),
- category=validated_data['category'],
- defaults={'data': validated_data['data']})
+ name=validated_data["name"].lower(),
+ category=validated_data["category"],
+ defaults={"data": validated_data["data"]},
+ )
instance.is_created = created
return instance
diff --git a/miller/base.py b/miller/base.py
index 00d948b..79712a3 100644
--- a/miller/base.py
+++ b/miller/base.py
@@ -1,55 +1,60 @@
-import os
-import logging
-from dotenv import dotenv_values
+import os, re
from django.core.exceptions import ImproperlyConfigured
from pathlib import Path # python3 only
+from dotenv import dotenv_values
+from typing import Any, Optional
+
+# # e.g. set ENV=production to get .production.env file
+dotenv_filename = (
+ ".{0}.env".format(os.environ.get("ENV", "")) if "ENV" in os.environ else ".env"
+)
+dotenv_path = str(Path(".") / dotenv_filename)
+dotenv_dict = dotenv_values(dotenv_path=dotenv_path, verbose=True)
+
+print(f"Loading env file: \033[94m{dotenv_path}\033[0m")
+# check that the file exists
+if not os.path.exists(dotenv_path):
+ raise ImproperlyConfigured("No .env file found at {0}".format(dotenv_path))
+
+# for k, v in dotenv_dict.items():
+# print("{0}={1}".format(k, v))
+
+
+def get_env_variable(var_name: str, default: Optional[Any] = None) -> Any:
+ """
+ Retrieve the value of an environment variable based on the selected environment file.
+
+ The function first checks if the variable is defined in the dotenv file corresponding to the
+ current environment mode, as determined by the `ENV` setting. If `ENV` is set to a specific value
+ (e.g., `test`), the function loads variables from `.test.env`. If the variable is not found in
+ the dotenv file, it then checks the system's environment variables. If still not found, it returns
+ the `default` value if provided, or raises an error if required.
-logger = logging.getLogger(__name__)
-
-# Previous version of dotenv_values method now deprecated
-#
-# def dotenv_values(dotenv_path):
-# lines = []
-# try:
-# with open(dotenv_path) as fp:
-# lines = fp.read().splitlines()
-# except FileNotFoundError as e:
-# if sys.argv[1] == 'test':
-# logger.warning(
-# f'No dotenv file found using dotenv_path:{dotenv_path}'
-# )
-# return {}
-# else:
-# raise e
-#
-# # get tuples of values,property splitting each line of the file
-# lines = map(lambda l: tuple(re.split(r'\s*=\s*', l, 1)), filter(
-# None, lines
-# ))
-# lines = list(lines)
-# print(f"dotenv_values: found {len(lines)} valid lines")
-# if not lines:
-# return dict()
-# return dict(lines)
-
-
-def get_env_variable(var_name, default=None):
+ Environment Modes:
+ Set `ENV` to specify which dotenv file to load:
+ - `ENV=production` loads `.production.env`.
+ - `ENV=test` loads `.test.env`.
+ - If `ENV` is not set, the default `.env` file may be used.
+
+ Args:
+ var_name (str): Name of the environment variable to retrieve.
+ default (Optional[Any]): Value to return if the variable is not found. Defaults to None.
+
+ Returns:
+ Any: The value of the environment variable or the `default` value if not found.
+
+ Raises:
+ ImproperlyConfigured: If the environment variable is not found and no `default` is provided.
+
+ Example:
+ >>> get_env_variable('DATABASE_URL', default='sqlite:///:memory:')
+ """
if var_name in dotenv_dict:
return dotenv_dict[var_name]
try:
return os.environ[var_name]
except KeyError:
- if default == "" or default == 0 or default:
+ if default is not None:
return default
error_msg = "Set the %s environment variable" % var_name
raise ImproperlyConfigured(error_msg)
-
-
-# e.g. set ENV=production to get .production.env file
-dotenv_filename = (
- ".{0}.env".format(os.environ.get("ENV", "")) if "ENV" in os.environ else ".env"
-)
-dotenv_path = str(Path(".") / dotenv_filename)
-dotenv_dict = dotenv_values(dotenv_path=dotenv_path)
-
-logger.debug(f"loading env file: {dotenv_filename}")
diff --git a/miller/migrations/0013_document_tags_alter_document_type.py b/miller/migrations/0013_document_tags_alter_document_type.py
new file mode 100644
index 0000000..4c315bf
--- /dev/null
+++ b/miller/migrations/0013_document_tags_alter_document_type.py
@@ -0,0 +1,23 @@
+# Generated by Django 4.1.3 on 2024-11-14 13:46
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('miller', '0012_alter_document_documents_alter_document_type_and_more'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='document',
+ name='tags',
+ field=models.ManyToManyField(blank=True, to='miller.tag'),
+ ),
+ migrations.AlterField(
+ model_name='document',
+ name='type',
+ field=models.CharField(choices=[('to be defined', 'to be defined'), ('bibtex', 'bibtex'), ('crossref', 'crossref bibtex'), ('video-cover', 'video interview'), ('video', 'video'), ('audio', 'audio'), ('text', 'text'), ('picture', 'picture'), ('pdf', 'pdf'), ('image', 'image'), ('photo', 'photo'), ('rich', 'rich'), ('link', 'link'), ('audiovisual', 'audiovisual'), ('glossary', 'glossary entry'), ('entity', 'entity: see data type property')], default='to be defined', max_length=24),
+ ),
+ ]
diff --git a/miller/models/document.py b/miller/models/document.py
index fd87867..0605471 100644
--- a/miller/models/document.py
+++ b/miller/models/document.py
@@ -10,7 +10,7 @@
from ..snapshots import create_snapshot, create_different_sizes_from_snapshot
from ..utils.models import get_search_vector_query, create_short_url, get_unique_slug
from ..utils.media import get_video_subtitles
-
+from . import Tag
logger = logging.getLogger(__name__)
@@ -24,79 +24,70 @@ def private_attachment_file_name(instance, filename):
def snapshot_attachment_file_name(instance, filename):
- return os.path.join(instance.type, 'snapshots', filename)
+ return os.path.join(instance.type, "snapshots", filename)
class Document(models.Model):
- TBD = 'to be defined'
- BIBLIOGRAPHIC_REFERENCE = 'bibtex'
- CROSSREF_REFERENCE = 'crossref'
- VIDEO_COVER = 'video-cover'
- PICTURE = 'picture'
- IMAGE = 'image'
- PHOTO = 'photo'
- VIDEO = 'video'
- AUDIO = 'audio'
- TEXT = 'text'
- PDF = 'pdf'
- RICH = 'rich'
- LINK = 'link'
- AV = 'audiovisual'
- GLOSSARY = 'glossary'
- ENTITY = 'entity'
+ TBD = "to be defined"
+ BIBLIOGRAPHIC_REFERENCE = "bibtex"
+ CROSSREF_REFERENCE = "crossref"
+ VIDEO_COVER = "video-cover"
+ PICTURE = "picture"
+ IMAGE = "image"
+ PHOTO = "photo"
+ VIDEO = "video"
+ AUDIO = "audio"
+ TEXT = "text"
+ PDF = "pdf"
+ RICH = "rich"
+ LINK = "link"
+ AV = "audiovisual"
+ GLOSSARY = "glossary"
+ ENTITY = "entity"
TYPE_CHOICES = (
- (TBD, 'to be defined'),
- (BIBLIOGRAPHIC_REFERENCE, 'bibtex'),
- (CROSSREF_REFERENCE, 'crossref bibtex'),
- (VIDEO_COVER, 'video interview'),
- (VIDEO, 'video'),
- (AUDIO, 'audio'),
- (TEXT, 'text'),
- (PICTURE, 'picture'),
- (PDF, 'pdf'),
- (IMAGE, 'image'),
- (PHOTO, 'photo'),
- (RICH, 'rich'),
- (LINK, 'link'),
- (AV, 'audiovisual'),
- (GLOSSARY, 'glossary entry'),
+ (TBD, "to be defined"),
+ (BIBLIOGRAPHIC_REFERENCE, "bibtex"),
+ (CROSSREF_REFERENCE, "crossref bibtex"),
+ (VIDEO_COVER, "video interview"),
+ (VIDEO, "video"),
+ (AUDIO, "audio"),
+ (TEXT, "text"),
+ (PICTURE, "picture"),
+ (PDF, "pdf"),
+ (IMAGE, "image"),
+ (PHOTO, "photo"),
+ (RICH, "rich"),
+ (LINK, "link"),
+ (AV, "audiovisual"),
+ (GLOSSARY, "glossary entry"),
# for ENTITY, use the type field inside data JsonField.
- (ENTITY, 'entity: see data type property'),
+ (ENTITY, "entity: see data type property"),
) + settings.MILLER_DOCUMENT_TYPE_CHOICES
type = models.CharField(max_length=24, choices=TYPE_CHOICES, default=TBD)
short_url = models.CharField(
- max_length=22, db_index=True, unique=True, blank=True,
- default=create_short_url
+ max_length=22, db_index=True, unique=True, blank=True, default=create_short_url
)
- title = models.CharField(max_length=500, default='')
- slug = models.CharField(
- max_length=150, unique=True, blank=True, db_index=True
- )
+ title = models.CharField(max_length=500, default="")
+ slug = models.CharField(max_length=150, unique=True, blank=True, db_index=True)
data = UTF8JSONField(
- verbose_name=u'data contents', help_text='JSON format',
- default=dict, blank=True
+ verbose_name="data contents", help_text="JSON format", default=dict, blank=True
)
- copyrights = models.TextField(null=True, blank=True, default='')
+ copyrights = models.TextField(null=True, blank=True, default="")
url = models.URLField(max_length=500, null=True, blank=True)
- owner = models.ForeignKey(
- User, on_delete=models.CASCADE,
- null=True, blank=True
- )
+ owner = models.ForeignKey(User, on_delete=models.CASCADE, null=True, blank=True)
attachment = models.FileField(
- upload_to=attachment_file_name,
- null=True, blank=True, max_length=200
+ upload_to=attachment_file_name, null=True, blank=True, max_length=200
)
snapshot = models.FileField(
- upload_to=snapshot_attachment_file_name,
- null=True, blank=True, max_length=200
+ upload_to=snapshot_attachment_file_name, null=True, blank=True, max_length=200
)
- mimetype = models.CharField(max_length=127, blank=True, default='')
+ mimetype = models.CharField(max_length=127, blank=True, default="")
# @TODO prevent accidental override when it is not needed.
locked = models.BooleanField(default=False)
@@ -111,11 +102,13 @@ class Document(models.Model):
# undirected links
documents = models.ManyToManyField("self", blank=True)
+ tags = models.ManyToManyField(Tag, blank=True)
+
# enable full text search using postgres vectors
allow_fulltext_search = True
class Meta:
- indexes = [GinIndex(fields=['search_vector'])]
+ indexes = [GinIndex(fields=["search_vector"])]
def __str__(self):
return f'{self.slug} [{self.type}/{self.data.get("type", " - ")}]'
@@ -132,66 +125,62 @@ def create_snapshot_from_attachment(self, override=True):
If snapshot is already present, look for override param
"""
logger.info(
- f'create_snapshot_from_attachment document pk:{self.pk}'
- f' using type:{self.type} ...'
+ f"create_snapshot_from_attachment document pk:{self.pk}"
+ f" using type:{self.type} ..."
)
- if not self.attachment or not getattr(self.attachment, 'path', None):
+ if not self.attachment or not getattr(self.attachment, "path", None):
logger.error(
- f'create_snapshot_from_attachment document pk:{self.pk}'
- f' failed, no attachment found! Skip.')
+ f"create_snapshot_from_attachment document pk:{self.pk}"
+ f" failed, no attachment found! Skip."
+ )
return
if not os.path.exists(self.attachment.path):
logger.error(
- f'create_snapshot_from_attachment document pk:{self.pk} '
- f'failed, attached file {self.attachment.path} does not exist.'
+ f"create_snapshot_from_attachment document pk:{self.pk} "
+ f"failed, attached file {self.attachment.path} does not exist."
)
return
# get snaphot path and its width / height
snapshot, w, h = create_snapshot(
- basepath=self.type,
- source=self.attachment.path
+ basepath=self.type, source=self.attachment.path
)
# save document, snapshot should be related to MEDIA_ROOT
- self.snapshot = os.path.join(*snapshot.replace(
- settings.MEDIA_ROOT, ''
- ).split('/'))
- self.data.update({
- 'snapshot': {
- 'width': w,
- 'height': h
- }
- })
+ self.snapshot = os.path.join(
+ *snapshot.replace(settings.MEDIA_ROOT, "").split("/")
+ )
+ self.data.update({"snapshot": {"width": w, "height": h}})
logger.info(
- f'create_snapshot_from_attachment document pk:{self.pk}'
- f' using file {self.attachment.path}'
- f' success: created {self.snapshot.path}'
+ f"create_snapshot_from_attachment document pk:{self.pk}"
+ f" using file {self.attachment.path}"
+ f" success: created {self.snapshot.path}"
)
self.save()
- def create_different_sizes_from_snapshot(self, data_key='resolutions'):
- if not self.snapshot or not getattr(self.snapshot, 'path', None):
+ def create_different_sizes_from_snapshot(self, data_key="resolutions"):
+ if not self.snapshot or not getattr(self.snapshot, "path", None):
logger.error(
- f'generate_other_images_from_snapshot document pk:{self.pk}'
- f' failed, no snapshot found! Skip.')
+ f"generate_other_images_from_snapshot document pk:{self.pk}"
+ f" failed, no snapshot found! Skip."
+ )
return
if not os.path.exists(self.snapshot.path):
logger.error(
- f'generate_other_images_from_snapshot document pk:{self.pk} '
- f'failed, snapshot file {self.snapshot.path} does not exist.'
+ f"generate_other_images_from_snapshot document pk:{self.pk} "
+ f"failed, snapshot file {self.snapshot.path} does not exist."
)
return
sizes = create_different_sizes_from_snapshot(
snapshot=self.snapshot.path,
sizes=[
- ('preview', settings.MILLER_SIZES_SNAPSHOT_PREVIEW),
- ('thumbnail', settings.MILLER_SIZES_SNAPSHOT_THUMBNAIL),
- ('medium', settings.MILLER_SIZES_SNAPSHOT_MEDIUM),
+ ("preview", settings.MILLER_SIZES_SNAPSHOT_PREVIEW),
+ ("thumbnail", settings.MILLER_SIZES_SNAPSHOT_THUMBNAIL),
+ ("medium", settings.MILLER_SIZES_SNAPSHOT_MEDIUM),
],
- format='jpg',
+ format="jpg",
data_key=data_key,
- media_url=settings.MEDIA_URL
+ media_url=settings.MEDIA_URL,
)
self.data.update(sizes)
self.save()
@@ -200,17 +189,23 @@ def handle_preview(self, override=False):
"""
Create a preview images according to the settings.
"""
- if not self.snapshot or not getattr(self.snapshot, 'path', None):
- if self.attachment and getattr(self.attachment, 'path', None):
- logger.info(f'handle_preview document pk:{self.pk} try creating snapshot')
+ if not self.snapshot or not getattr(self.snapshot, "path", None):
+ if self.attachment and getattr(self.attachment, "path", None):
+ logger.info(
+ f"handle_preview document pk:{self.pk} try creating snapshot"
+ )
self.create_snapshot_from_attachment()
else:
- logger.info(f'handle_preview document pk:{self.pk} no attachment found.')
+ logger.info(
+ f"handle_preview document pk:{self.pk} no attachment found."
+ )
elif override:
- logger.info(f'handle_preview pk:{self.pk}) creating snapshot...')
+ logger.info(f"handle_preview pk:{self.pk}) creating snapshot...")
self.create_snapshot_from_attachment()
else:
- logger.info(f'handle_preview document pk:{self.pk} skip snapshot generation, snapshot file found')
+ logger.info(
+ f"handle_preview document pk:{self.pk} skip snapshot generation, snapshot file found"
+ )
self.create_different_sizes_from_snapshot()
def update_data_by_type(self):
@@ -227,7 +222,7 @@ def update_data_by_type(self):
for images
"""
if self.type == Document.VIDEO:
- subtitles = get_video_subtitles(path_prefix=f'{self.type}/{self.slug}')
+ subtitles = get_video_subtitles(path_prefix=f"{self.type}/{self.slug}")
self.data.update(subtitles)
self.save()
@@ -246,34 +241,31 @@ def update_search_vector(self, verbose=False):
languages=settings.MILLER_LANGUAGES,
simple_fields=settings.MILLER_VECTORS_SIMPLE_FIELDS,
multilanguage_fields=settings.MILLER_VECTORS_MULTILANGUAGE_FIELDS,
- verbose=verbose
+ verbose=verbose,
)
if not contents:
- logger.error(
- f'update_search_vector failed for document:{self.pk} (empty?)'
- )
+ logger.error(f"update_search_vector failed for document:{self.pk} (empty?)")
return
if verbose:
- logger.info(
- f'VERBOSE - contents: {contents}'
- )
+ logger.info(f"VERBOSE - contents: {contents}")
with connection.cursor() as cursor:
- to_be_executed = ''.join([
- """
+ to_be_executed = "".join(
+ [
+ """
UPDATE miller_document
SET search_vector = x.weighted_tsv FROM (
SELECT id,
""",
- q,
- """
+ q,
+ """
AS weighted_tsv
FROM miller_document
WHERE miller_document.id=%s
) AS x
WHERE x.id = miller_document.id
- """
- ])
- cursor.execute(to_be_executed, [
- value
- for value, w, c in contents
- ] + [self.pk])
+ """,
+ ]
+ )
+ cursor.execute(
+ to_be_executed, [value for value, w, c in contents] + [self.pk]
+ )
diff --git a/miller/models/tag.py b/miller/models/tag.py
index 2843577..9dd9dbc 100644
--- a/miller/models/tag.py
+++ b/miller/models/tag.py
@@ -4,31 +4,31 @@
class Tag(models.Model):
- KEYWORD = 'keyword' # i.e, no special category at all
- BLOG = 'blog' # items tagged as events are "news"
- HIGHLIGHTS = 'highlights'
- WRITING = 'writing'
- COLLECTION = 'collection'
+ KEYWORD = "keyword" # i.e, no special category at all
+ BLOG = "blog" # items tagged as events are "news"
+ HIGHLIGHTS = "highlights"
+ WRITING = "writing"
+ COLLECTION = "collection"
# things related to publishing activity,
# I.E issue number that can be filtered by
- PUBLISHING = 'publishing'
+ PUBLISHING = "publishing"
CATEGORY_CHOICES = (
- (KEYWORD, 'keyword'),
- (BLOG, 'blog'),
- (HIGHLIGHTS, 'highlights'),
- (WRITING, 'writing'),
- (COLLECTION, 'collection'),
- (PUBLISHING, 'publishing')
+ (KEYWORD, "keyword"),
+ (BLOG, "blog"),
+ (HIGHLIGHTS, "highlights"),
+ (WRITING, "writing"),
+ (COLLECTION, "collection"),
+ (PUBLISHING, "publishing"),
) + settings.MILLER_TAG_CATEGORY_CHOICES
- HIDDEN = 'hidden'
+ HIDDEN = "hidden"
# everyone can access that.
- PUBLIC = 'public'
+ PUBLIC = "public"
STATUS_CHOICES = (
- (HIDDEN, 'keep this hidden'),
- (PUBLIC, 'published tag'),
+ (HIDDEN, "keep this hidden"),
+ (PUBLIC, "published tag"),
)
# e.g. 'Mr. E. Smith'
name = models.CharField(max_length=100)
@@ -36,20 +36,19 @@ class Tag(models.Model):
slug = models.SlugField(max_length=100, unique=True, blank=True)
# e.g. 'actor' or 'institution'
category = models.CharField(
- max_length=32, choices=CATEGORY_CHOICES, default=KEYWORD)
- status = models.CharField(
- max_length=10, choices=STATUS_CHOICES, default=PUBLIC)
+ max_length=32, choices=CATEGORY_CHOICES, default=KEYWORD
+ )
+ status = models.CharField(max_length=10, choices=STATUS_CHOICES, default=PUBLIC)
data = UTF8JSONField(
- verbose_name=u'data contents', help_text='JSON format',
- default=dict, blank=True
+ verbose_name="data contents", help_text="JSON format", default=dict, blank=True
)
def __str__(self):
- return self.slug
+ return f"{self.name} ({self.category})"
class Meta:
- unique_together = ('name', 'category')
+ unique_together = ("name", "category")
def __unicode__(self):
- return f'{self.name}({self.category})'
+ return f"{self.name} ({self.category})"
diff --git a/miller/schema/document/payload.json b/miller/schema/document/payload.json
index 595b6c9..34abd05 100644
--- a/miller/schema/document/payload.json
+++ b/miller/schema/document/payload.json
@@ -4,13 +4,7 @@
"type": "object",
"title": "JSON schema for JSONField:data",
"description": "Basic schema for a data field",
- "required": [
- "type",
- "end_date",
- "start_date",
- "year",
- "title"
- ],
+ "required": ["type", "end_date", "start_date", "year", "title"],
"properties": {
"archive_id": {
"$id": "#/properties/archive_id",
@@ -19,9 +13,7 @@
"description": "An explanation about the purpose of this instance.",
"default": "",
"maxLength": 127,
- "examples": [
- ""
- ]
+ "examples": [""]
},
"copyright": {
"$id": "#/properties/copyright",
@@ -29,9 +21,7 @@
"title": "The Copyright Schema",
"description": "An explanation about the purpose of this instance.",
"default": "",
- "examples": [
- ""
- ]
+ "examples": [""]
},
"provenance": {
"$id": "#/properties/provenance",
@@ -40,9 +30,7 @@
"description": "An explanation about the purpose of this instance.",
"default": "",
"maxLength": 200,
- "examples": [
- "Archiv(es) POST Luxembourg"
- ]
+ "examples": ["Archiv(es) POST Luxembourg"]
},
"type": {
"$id": "#/properties/type",
@@ -50,16 +38,8 @@
"title": "The Type Schema",
"description": "An explanation about the purpose of this instance.",
"default": "",
- "enum": [
- "Photo",
- "Image",
- "Cartoon",
- "Other",
- "Advertising"
- ],
- "examples": [
- "Other"
- ]
+ "enum": ["Photo", "image", "Cartoon", "Other", "Advertising"],
+ "examples": ["Other"]
},
"reference": {
"$id": "#/properties/reference",
@@ -68,9 +48,7 @@
"description": "An explanation about the purpose of this instance.",
"default": "",
"maxLength": 127,
- "examples": [
- ""
- ]
+ "examples": [""]
},
"start_date": {
"$id": "#/properties/start_date",
@@ -82,9 +60,7 @@
"pattern": "^[0-9]{4}-((0[13578]|1[02])-(0[1-9]|[12][0-9]|3[01])|(0[469]|11)-(0[1-9]|[12][0-9]|30)|(02)-(0[1-9]|[12][0-9]))(T(0[0-9]|1[0-9]|2[0-3]):(0[0-9]|[1-5][0-9]):(0[0-9]|[1-5][0-9])(\\.[0-9]{3}Z)?)?$",
"minLength": 1,
"maxLength": 25,
- "examples": [
- "2001-07-03"
- ]
+ "examples": ["2001-07-03"]
},
"end_date": {
"$id": "#/properties/end_date",
@@ -96,9 +72,7 @@
"pattern": "^[0-9]{4}-((0[13578]|1[02])-(0[1-9]|[12][0-9]|3[01])|(0[469]|11)-(0[1-9]|[12][0-9]|30)|(02)-(0[1-9]|[12][0-9]))(T(0[0-9]|1[0-9]|2[0-3]):(0[0-9]|[1-5][0-9]):(0[0-9]|[1-5][0-9])(\\.[0-9]{3}Z)?)?$",
"minLength": 1,
"maxLength": 25,
- "examples": [
- "2001-07-03"
- ]
+ "examples": ["2001-07-03"]
},
"year": {
"$id": "#/properties/year",
@@ -108,9 +82,7 @@
"default": 0,
"minLength": 1,
"maxLength": 20,
- "examples": [
- 2001
- ]
+ "examples": [2001]
},
"download": {
"$id": "#/properties/download",
@@ -118,9 +90,7 @@
"title": "The Download Schema",
"description": "An explanation about the purpose of this instance.",
"default": false,
- "examples": [
- false
- ]
+ "examples": [false]
},
"creator": {
"$id": "#/properties/creator",
@@ -128,9 +98,7 @@
"title": "The Creator Schema",
"description": "An explanation about the purpose of this instance.",
"default": "",
- "examples": [
- "Gaston Bohnenberger et Guy Modert"
- ]
+ "examples": ["Gaston Bohnenberger et Guy Modert"]
},
"title": {
"$id": "#/properties/title",
@@ -138,15 +106,13 @@
"title": "The Title Schema",
"description": "An explanation about the purpose of this instance.",
"default": {},
- "examples": [{
- "de_DE": "Finaler Bericht von Gaston Bohnenberger und Guy Modert zur Restrukturierung von EUTELSAT.",
- "fr_FR": "EUTELSAT - Rapport final de Guy Modert et Gaston Bohnenberger sur la restructuration"
- }],
- "required": [
- "de_DE",
- "fr_FR",
- "en_GB"
+ "examples": [
+ {
+ "de_DE": "Finaler Bericht von Gaston Bohnenberger und Guy Modert zur Restrukturierung von EUTELSAT.",
+ "fr_FR": "EUTELSAT - Rapport final de Guy Modert et Gaston Bohnenberger sur la restructuration"
+ }
],
+ "required": ["de_DE", "fr_FR", "en_GB"],
"format": "translate",
"properties": {
"de_DE": {
@@ -193,15 +159,13 @@
"title": "The Description Schema",
"description": "An explanation about the purpose of this instance.",
"default": {},
- "examples": [{
- "de_DE": "blablalb",
- "fr_FR": "blibli"
- }],
- "required": [
- "de_DE",
- "fr_FR",
- "en_GB"
+ "examples": [
+ {
+ "de_DE": "blablalb",
+ "fr_FR": "blibli"
+ }
],
+ "required": ["de_DE", "fr_FR", "en_GB"],
"format": "translate",
"properties": {
"de_DE": {
@@ -211,9 +175,7 @@
"description": "An explanation about the purpose of this instance.",
"minLength": 1,
"default": "",
- "examples": [
- "blablalb"
- ]
+ "examples": ["blablalb"]
},
"fr_FR": {
"$id": "#/properties/description/properties/fr_FR",
@@ -222,9 +184,7 @@
"description": "An explanation about the purpose of this instance.",
"minLength": 1,
"default": "",
- "examples": [
- "blibli"
- ]
+ "examples": ["blibli"]
},
"en_GB": {
"$id": "#/properties/description/properties/en_GB",
@@ -233,9 +193,7 @@
"description": "An explanation about the purpose of this instance.",
"minLength": 1,
"default": "",
- "examples": [
- "blibli"
- ]
+ "examples": ["blibli"]
}
}
}
diff --git a/miller/settings.py b/miller/settings.py
index e28071a..c9dbb06 100644
--- a/miller/settings.py
+++ b/miller/settings.py
@@ -100,25 +100,20 @@
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
-if sys.argv[1] == "test":
- DATABASES = {
- "default": {
- "ENGINE": "django.db.backends.sqlite3",
- "NAME": os.path.join(BASE_DIR, "db.sqlite3"),
- }
- }
-else:
- DATABASES = {
- "default": {
- "ENGINE": "django.db.backends.postgresql_psycopg2",
- "NAME": get_env_variable("MILLER_DATABASE_NAME"),
- "USER": get_env_variable("MILLER_DATABASE_USER"),
- "PASSWORD": get_env_variable("MILLER_DATABASE_PASSWORD"),
- "HOST": get_env_variable("MILLER_DATABASE_HOST", "localhost"),
- "PORT": get_env_variable("MILLER_DATABASE_PORT", "54320"),
- }
+DATABASES = {
+ "default": {
+ "ENGINE": "django.db.backends.postgresql_psycopg2",
+ "NAME": get_env_variable("MILLER_DATABASE_NAME"),
+ "USER": get_env_variable("MILLER_DATABASE_USER"),
+ "PASSWORD": get_env_variable("MILLER_DATABASE_PASSWORD"),
+ "HOST": get_env_variable("MILLER_DATABASE_HOST", "localhost"),
+ "PORT": get_env_variable("MILLER_DATABASE_PORT", "5432"),
}
+}
+if "test" in sys.argv:
+ DATABASES["default"]["ENGINE"] = "django.db.backends.sqlite3"
+ DATABASES["default"]["TEST"]["NAME"] = ":memory:"
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
@@ -280,7 +275,7 @@
# Celery
REDIS_HOST = get_env_variable("REDIS_HOST", "localhost")
-REDIS_PORT = get_env_variable("REDIS_PORT", "63790")
+REDIS_PORT = get_env_variable("REDIS_PORT", "6379")
CELERY_BROKER_URL = f"redis://{REDIS_HOST}:{REDIS_PORT}/4"
CELERY_RESULT_BACKEND = f"redis://{REDIS_HOST}:{REDIS_PORT}/5"
CELERYD_PREFETCH_MULTIPLIER = 2