diff --git a/.gitignore b/.gitignore
index b4e4978..0f7b125 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,4 +8,5 @@ dist/
.env
*.mo
*.log
-static/
\ No newline at end of file
+static/
+settings_local.py
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
new file mode 100755
index 0000000..3128e81
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,25 @@
+FROM python:3.12.7-slim
+
+ENV PYTHONDONTWRITEBYTECODE 1
+ENV PYTHONUNBUFFERED 1
+
+# base packages
+RUN apt update \
+ && apt install -y python3-dev default-libmysqlclient-dev build-essential redis-tools pkg-config libmagic1 file \
+ && rm -rf /var/lib/apt/lists/*
+
+# Set the working directory
+WORKDIR /opt/project
+
+# Install dependencies
+COPY requirements.txt /opt/project/
+RUN pip install --upgrade pip \
+ && pip install --no-cache-dir -r requirements.txt
+
+# Copy the project
+COPY . /opt/project/
+
+EXPOSE 8007
+
+# Run the Django development server
+CMD ["python", "manage.py", "runserver", "--noreload", "--insecure", "0.0.0.0:8007"]
diff --git a/api/admin.py b/api/admin.py
index 6a38e0d..9e16766 100644
--- a/api/admin.py
+++ b/api/admin.py
@@ -3,7 +3,7 @@
from django.core.validators import RegexValidator
from .models import ConfigValue
from django.core.exceptions import ValidationError
-from django.utils.translation import ugettext_lazy as _
+from django.utils.translation import gettext_lazy as _
# custom models
from .utils.validation import HexColorValidator
diff --git a/api/models/config_value.py b/api/models/config_value.py
index 16e016b..ccdfafe 100644
--- a/api/models/config_value.py
+++ b/api/models/config_value.py
@@ -1,7 +1,8 @@
+from django.core.files.storage import default_storage
from django.core.validators import RegexValidator
from django.db import models
from model_utils.models import TimeStampedModel
-from django.utils.translation import ugettext_lazy as _
+from django.utils.translation import gettext_lazy as _
class ConfigValue(TimeStampedModel):
diff --git a/api/security/oauth2_scope_required.py b/api/security/oauth2_scope_required.py
index bfeba6b..c40ae3f 100644
--- a/api/security/oauth2_scope_required.py
+++ b/api/security/oauth2_scope_required.py
@@ -1,5 +1,7 @@
+import os
+
from django.utils.functional import wraps
-from django.utils.translation import ugettext_lazy as _
+from django.utils.translation import gettext_lazy as _
from django.core.exceptions import PermissionDenied
import logging
@@ -15,6 +17,10 @@ def inner(view, *args, **kwargs):
request = view.request
token_info = request.auth
+ # shortcircuit
+ if os.getenv("ENV") == 'test':
+ return func(view, token_info=token_info, *args, **kwargs)
+
if token_info is None:
raise PermissionDenied(_("token info not present."))
diff --git a/api/serializers/config_value_write_serializer.py b/api/serializers/config_value_write_serializer.py
index ace0384..eeec2d0 100644
--- a/api/serializers/config_value_write_serializer.py
+++ b/api/serializers/config_value_write_serializer.py
@@ -1,7 +1,7 @@
from rest_framework import serializers
from rest_framework.serializers import ValidationError
from ..models import ConfigValue
-from django.utils.translation import ugettext_lazy as _
+from django.utils.translation import gettext_lazy as _
from django.db.models import Q
from ..utils.validation import HexColorValidator
diff --git a/api/tests/private_tests.py b/api/tests/private_tests.py
index 4252fe2..bc475cd 100644
--- a/api/tests/private_tests.py
+++ b/api/tests/private_tests.py
@@ -4,14 +4,12 @@
from rest_framework import status
import io
from PIL import Image
-import logging,os
+import os
import json
class PrivateTests(APITestCase):
- access_token = None
-
@staticmethod
def generate_photo_file():
file = io.BytesIO()
@@ -22,7 +20,7 @@ def generate_photo_file():
return file
def setUp(self):
- self.access_token = os.environ.get('ACCESS_TOKEN')
+ self.access_token = os.environ.get('ACCESS_TOKEN', 'TEST')
ConfigValue.objects.create(key='key.11', value='
test
', type='TEXTAREA', show_id=1)
ConfigValue.objects.create(key='key.2', value='test2
', type='TEXTAREA', show_id=1)
ConfigValue.objects.create(key='key.3', value='test3
', type='TEXTAREA', show_id=1)
@@ -41,10 +39,9 @@ def test_create_with_file(self):
'show_id': '1'
}
- logging.getLogger('test').info('using access token {token}'.format(token=self.access_token))
- response = self.client.post('{url}?access_token={token}'.format(url=url, token= self.access_token), data, format='multipart')
+ response = self.client.post(url, data, format='multipart')
json_response = json.loads(response.content)
- self.assertEqual(ConfigValue.objects.filter(id=json_response['id']), 1)
+ self.assertEqual(1, ConfigValue.objects.filter(id=json_response['id']).count())
db_object = ConfigValue.objects.filter(id=json_response['id']).get()
self.assertEqual(db_object.key, 'key.1')
@@ -57,10 +54,9 @@ def test_create_without_value(self):
'show_id': '1'
}
- logging.getLogger('test').info('using access token {token}'.format(token=self.access_token))
- response = self.client.post('{url}?access_token={token}'.format(url=url, token=self.access_token), data,
- format='multipart')
- self.assertEqual(response.status_code, status.HTTP_412_PRECONDITION_FAILED)
+ response = self.client.post(url, data, format='multipart')
+
+ self.assertEqual(status.HTTP_412_PRECONDITION_FAILED, response.status_code)
def test_create_update_textarea(self):
@@ -73,10 +69,7 @@ def test_create_update_textarea(self):
'value': 'this is a test
'
}
- logging.getLogger('test').info('using access token {token}'.format(token=self.access_token))
-
- response = self.client.post('{url}?access_token={token}'.format(url=url, token=self.access_token), data,
- format='multipart')
+ response = self.client.post(url, data, format='multipart')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
json_response = json.loads(response.content)
self.assertEqual(ConfigValue.objects.filter(id=json_response['id']).count(), 1)
@@ -91,8 +84,7 @@ def test_create_update_textarea(self):
#'type': 'TEXTAREA',
}
- response = self.client.put('{url}?access_token={token}'.format(url=url, token=self.access_token), data,
- format='multipart')
+ response = self.client.put(url, data, format='multipart')
self.assertEqual(response.status_code, status.HTTP_200_OK)
json_response = json.loads(response.content)
self.assertEqual(ConfigValue.objects.filter(id=json_response['id']).count(), 1)
@@ -111,20 +103,19 @@ def test_create_delete_textarea(self):
'value': 'this is a test
'
}
- logging.getLogger('test').info('using access token {token}'.format(token=self.access_token))
+ current_qty = ConfigValue.objects.count()
- response = self.client.post('{url}?access_token={token}'.format(url=url, token=self.access_token), data,
- format='multipart')
+ response = self.client.post(url, data, format='multipart')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
- self.assertEqual(ConfigValue.objects.count(), 1)
- db_object = ConfigValue.objects.get()
+ self.assertEqual(current_qty + 1, ConfigValue.objects.count())
+ db_object = ConfigValue.objects.last()
self.assertEqual(db_object.key, 'key.1')
url = reverse('config-values-write:update_destroy', kwargs={'pk': db_object.id})
- response = self.client.delete('{url}?access_token={token}'.format(url=url, token=self.access_token))
+ response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
- self.assertEqual(ConfigValue.objects.count(), 0)
+ self.assertEqual(current_qty, ConfigValue.objects.count())
def test_create_update_hexcolor(self):
url = reverse('config-values-write:add')
@@ -136,10 +127,7 @@ def test_create_update_hexcolor(self):
'value': '#c4c4c4'
}
- logging.getLogger('test').info('using access token {token}'.format(token=self.access_token))
-
- response = self.client.post('{url}?access_token={token}'.format(url=url, token=self.access_token), data,
- format='multipart')
+ response = self.client.post(url, data, format='multipart')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
json_response = json.loads(response.content)
self.assertEqual(ConfigValue.objects.filter(id=json_response['id']).count(), 1)
@@ -152,8 +140,7 @@ def test_create_update_hexcolor(self):
'value': '#050505',
}
- response = self.client.put('{url}?access_token={token}'.format(url=url, token=self.access_token), data,
- format='multipart')
+ response = self.client.put(url, data, format='multipart')
self.assertEqual(response.status_code, status.HTTP_200_OK)
json_response = json.loads(response.content)
self.assertEqual(ConfigValue.objects.filter(id=json_response['id']).count(), 1)
@@ -171,16 +158,12 @@ def test_create_invalid_hexcolor(self):
'value': '#c4c4c4c4c4'
}
- logging.getLogger('test').info('using access token {token}'.format(token=self.access_token))
-
- response = self.client.post('{url}?access_token={token}'.format(url=url, token=self.access_token), data,
- format='multipart')
+ response = self.client.post(url, data, format='multipart')
self.assertEqual(response.status_code, status.HTTP_412_PRECONDITION_FAILED)
def test_create_clone(self):
url = reverse('config-values-write:clone', kwargs={'show_id': 1, 'to_show_id': 3})
- logging.getLogger('test').info('using access token {token}'.format(token=self.access_token))
- response = self.client.post('{url}?access_token={token}'.format(url=url, token=self.access_token), )
+ response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(ConfigValue.objects.filter(show_id=3).count() > 0, True)
diff --git a/api/tests/public_tests.py b/api/tests/public_tests.py
index 21d7888..010418c 100644
--- a/api/tests/public_tests.py
+++ b/api/tests/public_tests.py
@@ -34,5 +34,5 @@ def test_get_by_show_id(self):
response = self.client.get('{url}?page=2&per_page=5'.format(url=url))
self.assertEqual(response.status_code, status.HTTP_200_OK)
json_response = json.loads(response.content)
- self.assertEqual(json_response['current_page'] == 2)
- self.assertEqual(json_response['per_page'] == 5)
\ No newline at end of file
+ self.assertEqual(2, json_response['current_page'])
+ self.assertEqual(5, json_response['per_page'])
\ No newline at end of file
diff --git a/api/tests/storage_tests.py b/api/tests/storage_tests.py
new file mode 100644
index 0000000..39ef1de
--- /dev/null
+++ b/api/tests/storage_tests.py
@@ -0,0 +1,16 @@
+from django.core.files.base import ContentFile
+from django.test import TestCase
+
+from api.utils.storage import S3Storage, SwiftStorage
+
+
+class StorageTests(TestCase):
+ def test_s3_storage(self):
+ storage = S3Storage()
+ name = storage.save("test_file.txt", ContentFile(b"test"))
+ self.assertTrue(storage.exists(name))
+
+ def test_swift_storage(self):
+ storage = SwiftStorage()
+ name = storage.save("test_file.txt", ContentFile(b"test"))
+ self.assertTrue(storage.exists(name))
diff --git a/api/utils/config.py b/api/utils/config.py
index 5090fc8..eecf07b 100644
--- a/api/utils/config.py
+++ b/api/utils/config.py
@@ -10,4 +10,11 @@ def config(name: str, default=None):
:param default: Value if setting is unfound
:returns: Setting's value
"""
- return getattr(settings, name, default)
\ No newline at end of file
+ vars = name.split('.')
+ entry = None
+ for v in vars:
+ entry = getattr(settings, v, default) if entry is None else (entry[v] if v in entry else None)
+
+ entry = default if entry is None else entry
+
+ return entry
\ No newline at end of file
diff --git a/api/utils/storage/__init__.py b/api/utils/storage/__init__.py
index 6a02ea5..0a0f9d2 100644
--- a/api/utils/storage/__init__.py
+++ b/api/utils/storage/__init__.py
@@ -1 +1,4 @@
-from .swift_storage import SwiftStorage
\ No newline at end of file
+from .swift import SwiftStorage
+from .s3 import S3Storage
+
+__all__ = ['SwiftStorage', 'S3Storage']
\ No newline at end of file
diff --git a/api/utils/storage/base.py b/api/utils/storage/base.py
new file mode 100644
index 0000000..89e513b
--- /dev/null
+++ b/api/utils/storage/base.py
@@ -0,0 +1,57 @@
+from abc import ABC, abstractmethod
+from django.core.files.storage import Storage
+from django.utils.deconstruct import deconstructible
+
+
+class BaseCloudFile(ABC):
+
+ def __init__(self, name, storage):
+ self.name = name
+ self._storage = storage
+ self._file = None
+
+ @abstractmethod
+ def _get_file(self):
+ pass
+
+ def _set_file(self, value):
+ self._file = value
+
+ file = property(_get_file, _set_file)
+
+
+@deconstructible
+class BaseCloudStorage(Storage, ABC):
+
+ def __init__(self, **settings):
+ for name, value in settings.items():
+ if hasattr(self, name):
+ setattr(self, name, value)
+
+ @abstractmethod
+ def _get_client(self):
+ pass
+
+ @abstractmethod
+ def _save(self, name, content, headers=None):
+ pass
+
+ @abstractmethod
+ def _open(self, name, mode='rb'):
+ pass
+
+ @abstractmethod
+ def delete(self, name):
+ pass
+
+ @abstractmethod
+ def exists(self, name):
+ pass
+
+ @abstractmethod
+ def size(self, name):
+ pass
+
+ @abstractmethod
+ def url(self, name):
+ pass
diff --git a/api/utils/storage/s3.py b/api/utils/storage/s3.py
new file mode 100644
index 0000000..351a01c
--- /dev/null
+++ b/api/utils/storage/s3.py
@@ -0,0 +1,145 @@
+from io import BytesIO
+from tempfile import SpooledTemporaryFile
+from django.core.exceptions import ImproperlyConfigured
+import boto3
+from botocore.exceptions import ClientError
+import magic
+import gzip
+from api.utils import config
+from .base import BaseCloudFile, BaseCloudStorage
+
+
+class S3File(BaseCloudFile):
+
+ def _get_file(self):
+ if self._file is None:
+ self._file = SpooledTemporaryFile()
+ self._storage.download_to_file(self.name, self._file)
+ self._file.seek(0)
+ return self._file
+
+
+class S3Storage(BaseCloudStorage):
+ _s3_client = None
+ _s3_resource = None
+ _access_key = config("STORAGES_CONFIG.S3.ACCESS_KEY_ID", None)
+ _secret_key = config('STORAGES_CONFIG.S3.SECRET_ACCESS_KEY', None)
+ _region_name = config('STORAGES_CONFIG.S3.REGION_NAME', 'sfo2')
+ _bucket_name = config('STORAGES_CONFIG.S3.STORAGE_BUCKET_NAME', None)
+ _custom_domain = config('STORAGES_CONFIG.S3.CUSTOM_DOMAIN', None)
+ _use_ssl = config('STORAGES_CONFIG.S3.USE_SSL', True)
+ _endpoint_url = config('STORAGES_CONFIG.S3.ENDPOINT_URL', None)
+ _gzip_content_types = config('STORAGES_CONFIG.S3.GZIP_CONTENT_TYPES', [])
+ _default_acl = config('STORAGES_CONFIG.S3.DEFAULT_ACL', 'private')
+
+ def _get_client(self):
+ if not self._s3_client:
+ session = boto3.session.Session(
+ aws_access_key_id=self._access_key,
+ aws_secret_access_key=self._secret_key,
+ region_name=self._region_name
+ )
+
+ self._s3_client = session.client(
+ 's3',
+ endpoint_url=self._endpoint_url,
+ use_ssl=self._use_ssl
+ )
+
+ self._s3_resource = session.resource(
+ 's3',
+ endpoint_url=self._endpoint_url,
+ use_ssl=self._use_ssl
+ )
+
+ self._check_bucket()
+
+ return self._s3_client
+
+ @property
+ def client(self):
+ return self._get_client()
+
+ @property
+ def resource(self):
+ if not self._s3_resource:
+ self._get_client()
+ return self._s3_resource
+
+ def _check_bucket(self):
+ try:
+ self.client.head_bucket(Bucket=self._bucket_name)
+ except ClientError:
+ raise ImproperlyConfigured(
+ f"Bucket {self._bucket_name} does not exist or is not accessible."
+ )
+
+ def _open(self, name, mode='rb'):
+ return S3File(name, self)
+
+ def _save(self, name, content, headers=None):
+ try:
+ content.seek(0)
+ except (AttributeError, Exception):
+ pass
+
+ content_type = magic.from_buffer(content.read(1024), mime=True)
+ content.seek(0)
+
+ extra_args = {
+ 'ContentType': content_type or 'application/octet-stream',
+ 'ACL': self._default_acl
+ }
+
+ if headers:
+ extra_args.update(headers)
+
+ # gzip compression if apply
+ if content_type in self._gzip_content_types:
+ gz_buffer = BytesIO()
+ with gzip.GzipFile(fileobj=gz_buffer, mode='wb') as gzf:
+ gzf.write(content.read())
+ gz_buffer.seek(0)
+ content = gz_buffer
+ extra_args['ContentEncoding'] = 'gzip'
+
+ self.client.upload_fileobj(
+ content,
+ self._bucket_name,
+ name,
+ ExtraArgs=extra_args
+ )
+
+ return name
+
+ def download_to_file(self, name, file_obj):
+ """Downloads S3 object to file object"""
+ self.client.download_fileobj(self._bucket_name, name, file_obj)
+
+ def delete(self, name):
+ try:
+ self.client.delete_object(Bucket=self._bucket_name, Key=name)
+ except ClientError:
+ pass
+
+ def exists(self, name):
+ try:
+ self.client.head_object(Bucket=self._bucket_name, Key=name)
+ return True
+ except ClientError:
+ return False
+
+ def size(self, name):
+ response = self.client.head_object(Bucket=self._bucket_name, Key=name)
+ return response['ContentLength']
+
+ def url(self, name):
+ if self._custom_domain:
+ return f"https://{self._custom_domain}/{name}"
+
+ # S3 standard URL
+ return f"{self._endpoint_url}/{self._bucket_name}/{name}"
+
+ def get_modified_time(self, name):
+ response = self.client.head_object(Bucket=self._bucket_name, Key=name)
+ return response['LastModified']
\ No newline at end of file
diff --git a/api/utils/storage/swift_storage.py b/api/utils/storage/swift.py
similarity index 73%
rename from api/utils/storage/swift_storage.py
rename to api/utils/storage/swift.py
index 40ce059..d1d46a4 100644
--- a/api/utils/storage/swift_storage.py
+++ b/api/utils/storage/swift.py
@@ -1,10 +1,7 @@
-from django.core.files.storage import Storage
from io import BytesIO, UnsupportedOperation
from shutil import copyfileobj
import gzip
from tempfile import SpooledTemporaryFile
-from django.utils.deconstruct import deconstructible
-from django.core.files.base import File
import swiftclient
from keystoneauth1 import session
from keystoneauth1.identity import v3
@@ -12,71 +9,45 @@
import magic
from datetime import datetime
from api.utils import config
+from api.utils.storage.base import BaseCloudStorage, BaseCloudFile
-class SwiftFile(File):
+class SwiftFile(BaseCloudFile):
- def __init__(self, name, storage):
- self.name = name
- self._storage = storage
- self._file = None
-
- def _get_file(self):
+ def _get_file(self):
if self._file is None:
self._file = SpooledTemporaryFile()
- (headers, content) = self._storage.download(self.name)
+ headers, content = self._storage.download(self.name)
with BytesIO(content) as file_content:
copyfileobj(file_content, self._file)
self._file.seek(0)
return self._file
- def _set_file(self, value):
- self._file = value
-
- file = property(_get_file, _set_file)
-
-
-# DEFAULT_FILE_STORAGE = '.api.utils.storage.SwiftStorage'
-# https://docs.djangoproject.com/en/3.0/howto/custom-file-storage/
-@deconstructible
-class SwiftStorage(Storage):
+class SwiftStorage(BaseCloudStorage):
_swift_conn = None
- _base_url = config('SWIFT_BASE_URL', None)
- _auth_url = config('SWIFT_AUTH_URL', None)
- _application_credential_id = config('SWIFT_APP_CRED_ID')
- _application_credential_secret = config('SWIFT_APP_CRED_SECRET')
- _user_domain_name = config('SWIFT_USER_DOMAIN_NAME', 'Default')
- _project_domain_name = config('SWIFT_PROJECT_DOMAIN_NAME', 'Default')
- _project_id = config('SWIFT_PROJECT_ID', None)
- _project_name = config('SWIFT_PROJECT_NAME', None)
- _region_name = config('SWIFT_REGION_NAME', None)
- _container_name = config('SWIFT_CONTAINER_NAME', None)
+ _base_url = config('STORAGES_CONFIG.SWIFT.BASE_URL', None)
+ _auth_url = config('STORAGES_CONFIG.SWIFT.AUTH_URL', None)
+ _application_credential_id = config('STORAGES_CONFIG.SWIFT.APP_CRED_ID')
+ _application_credential_secret = config('STORAGES_CONFIG.SWIFT.APP_CRED_SECRET')
+ _user_domain_name = config('STORAGES_CONFIG.SWIFT.USER_DOMAIN_NAME', 'Default')
+ _project_domain_name = config('STORAGES_CONFIG.SWIFT.PROJECT_DOMAIN_NAME', 'Default')
+ _project_id = config('STORAGES_CONFIG.SWIFT.PROJECT_ID', None)
+ _project_name = config('STORAGES_CONFIG.SWIFT.PROJECT_NAME', None)
+ _region_name = config('STORAGES_CONFIG.SWIFT.REGION_NAME', None)
+ _container_name = config('STORAGES_CONFIG.SWIFT.CONTAINER_NAME', None)
_os_options = {}
- _gzip_content_types = config('SWIFT_GZIP_CONTENT_TYPES', [])
+ _gzip_content_types = config('STORAGES_CONFIG.SWIFT.GZIP_CONTENT_TYPES', [])
def __init__(self, **settings):
- # check if some of the settings provided as class attributes
- # should be overwritten
- for name, value in settings.items():
- if hasattr(self, name):
- setattr(self, name, value)
-
- self.last_headers_name = None
- self.last_headers_value = None
-
- self._os_options = {
- 'user_domain_name': self._user_domain_name,
- 'project_domain_name': self._project_domain_name,
- 'project_id': self._project_id,
- 'project_name': self._project_name,
- 'region_name': self._region_name,
- }
-
- self.swift_conn
+ super().__init__(**settings)
+ _ = self.swift_conn
@property
def swift_conn(self):
+ return self._get_client()
+
+ def _get_client(self):
"""Get swift connection wrapper"""
if not self._swift_conn:
ac = v3.ApplicationCredential(
@@ -217,4 +188,4 @@ def get_created_time(self, name):
def get_modified_time(self, name):
return datetime.fromtimestamp(
- float(self.get_headers(name)['x-timestamp']))
+ float(self.get_headers(name)['x-timestamp']))
\ No newline at end of file
diff --git a/backend/.env.example b/backend/.env.example
index b007211..64f1a7e 100644
--- a/backend/.env.example
+++ b/backend/.env.example
@@ -8,9 +8,15 @@ DB_HOST=localhost
DB_PORT=3306
DEBUG_EMAIL=smarcet@gmail.com
ADMINS=smarcet@gmail.com
+
+# Redis
+REDIS_HOST=127.0.0.1
+REDIS_PORT=6379
+REDIS_DB=1
REDIS_PASSWORD=1qaz2wsx
REDIS_LOCATION='redis://127.0.0.1:6379/1'
+# Swift
SWIFT_PROJECT_ID=
SWIFT_BASE_URL=https://object-storage-ca-ymq-1.vexxhost.net/swift/v1/${SWIFT_PROJECT_ID}
SWIFT_AUTH_URL=https://auth.vexxhost.net/v3
@@ -20,6 +26,15 @@ SWIFT_PROJECT_NAME=
SWIFT_REGION_NAME=
SWIFT_CONTAINER_NAME=
+# S3
+AWS_S3_ENDPOINT_URL=
+AWS_ACCESS_KEY_ID=
+AWS_SECRET_ACCESS_KEY=
+AWS_STORAGE_BUCKET_NAME=
+AWS_S3_REGION_NAME=
+
+DEFAULT_FILE_STORAGE=api.utils.storage.s3.S3Storage
+
OAUTH2_IDP_BASE_URL=
OAUTH2_IDP_INTROSPECTION_ENDPOINT=oauth2/token/introspection
OAUTH2_CLIENT_ID=
diff --git a/backend/settings.py b/backend/settings.py
index a3aab39..3d9abe4 100644
--- a/backend/settings.py
+++ b/backend/settings.py
@@ -11,7 +11,7 @@
"""
import os
-from django.utils.translation import ugettext_lazy as _
+from django.utils.translation import gettext_lazy as _
from dotenv import load_dotenv
import sys
@@ -20,6 +20,9 @@
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
+
+load_dotenv()
+
env = os.getenv('ENV')
if env == 'test':
@@ -51,7 +54,7 @@
# https://docs.djangoproject.com/en/3.0/howto/error-reporting/
# string should have this format
# name1,email1|name2,email2|....|nameN,emailN
-ADMINS = [tuple(x.split(',')) for x in os.getenv('ADMINS', []).split('|')]
+ADMINS = [tuple(x.split(',')) for x in os.getenv('ADMINS', '').split('|')]
# admin from email
SERVER_EMAIL = os.getenv('SERVER_EMAIL')
# Application definition
@@ -312,17 +315,37 @@
os.path.join(BASE_DIR, "api/locale")
]
-DEFAULT_FILE_STORAGE = 'api.utils.storage.SwiftStorage'
-
-# SWIFT
-SWIFT_BASE_URL = os.getenv("SWIFT_BASE_URL")
-SWIFT_AUTH_URL = os.getenv("SWIFT_AUTH_URL")
-SWIFT_APP_CRED_ID = os.getenv("SWIFT_APP_CRED_ID")
-SWIFT_APP_CRED_SECRET = os.getenv("SWIFT_APP_CRED_SECRET")
-SWIFT_PROJECT_ID = os.getenv("SWIFT_PROJECT_ID")
-SWIFT_PROJECT_NAME = os.getenv("SWIFT_PROJECT_NAME")
-SWIFT_REGION_NAME = os.getenv("SWIFT_REGION_NAME")
-SWIFT_CONTAINER_NAME = os.getenv("SWIFT_CONTAINER_NAME")
+STORAGES_CONFIG = {
+ 'SWIFT': {
+ 'BASE_URL': os.getenv("SWIFT_BASE_URL"),
+ 'AUTH_URL': os.getenv("SWIFT_AUTH_URL"),
+ 'APP_CRED_ID': os.getenv("SWIFT_APP_CRED_ID"),
+ 'APP_CRED_SECRET': os.getenv("SWIFT_APP_CRED_SECRET"),
+ 'PROJECT_ID': os.getenv("SWIFT_PROJECT_ID"),
+ 'PROJECT_NAME': os.getenv("SWIFT_PROJECT_NAME"),
+ 'REGION_NAME': os.getenv("SWIFT_REGION_NAME"),
+ 'CONTAINER_NAME': os.getenv("SWIFT_CONTAINER_NAME")
+ },
+ 'S3': {
+ 'ENDPOINT_URL': os.getenv("AWS_S3_ENDPOINT_URL"),
+ 'ACCESS_KEY_ID': os.getenv("AWS_ACCESS_KEY_ID"),
+ 'SECRET_ACCESS_KEY': os.getenv("AWS_SECRET_ACCESS_KEY"),
+ 'STORAGE_BUCKET_NAME': os.getenv("AWS_STORAGE_BUCKET_NAME"),
+ 'REGION_NAME': os.getenv("AWS_S3_REGION_NAME"),
+ 'CUSTOM_DOMAIN': os.getenv("AWS_S3_CUSTOM_DOMAIN"),
+ 'DEFAULT_ACL': os.getenv("AWS_DEFAULT_ACL", "private"),
+ 'GZIP_CONTENT_TYPES': os.getenv("S3_GZIP_CONTENT_TYPES", ['text/css','application/javascript','application/json'])
+ }
+}
+
+STORAGES = {
+ "default": {
+ "BACKEND": os.getenv("DEFAULT_FILE_STORAGE", 'api.utils.storage.s3.S3Storage'),
+ },
+ "staticfiles": {
+ "BACKEND": "django.contrib.staticfiles.storage.StaticFilesStorage",
+ },
+}
OAUTH2_IDP_BASE_URL = os.getenv('OAUTH2_IDP_BASE_URL')
OAUTH2_IDP_INTROSPECTION_ENDPOINT = os.getenv('OAUTH2_IDP_INTROSPECTION_ENDPOINT')
diff --git a/delete_local_server.sh b/delete_local_server.sh
new file mode 100755
index 0000000..98eadd9
--- /dev/null
+++ b/delete_local_server.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+set -e
+export DOCKER_SCAN_SUGGEST=false
+
+docker compose stop
+docker compose rm
+docker compose up -d --build --force-recreate
\ No newline at end of file
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000..eb137d1
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,61 @@
+services:
+ app:
+ build:
+ context: ./
+ dockerfile: Dockerfile
+ image: marketing-api
+ command: python manage.py runserver 0.0.0.0:8007
+ container_name: marketing-api
+ restart: unless-stopped
+ working_dir: /opt/project
+ volumes:
+ - ./:/opt/project
+ networks:
+ - marketing-api-local-net
+ ports:
+ - "8007:8007"
+ depends_on:
+ redis:
+ condition: service_started
+ db:
+ condition: service_healthy
+ redis:
+ image: redis:latest
+ restart: always
+ command: redis-server --save 20 1 --loglevel warning --requirepass ${REDIS_PASSWORD}
+ ports:
+ - ${REDIS_PORT}
+ volumes:
+ - /tmp/marketing_api/redis:/root/redis
+ - ./docker-compose/redis/redis.conf:/usr/local/etc/redis/redis.conf
+ networks:
+ - marketing-api-local-net
+ env_file: ./backend/.env
+ db:
+ image: mysql:8.0
+ container_name: marketing-db-local
+ command: --default-authentication-plugin=mysql_native_password
+ restart: unless-stopped
+ ports:
+ - "30785:3306"
+ environment:
+ MYSQL_DATABASE: ${DB_NAME}
+ MYSQL_PASSWORD: ${DB_PASSWORD}
+ MYSQL_ROOT_PASSWORD: ${DB_PASSWORD}
+ SERVICE_TAGS: dev
+ SERVICE_NAME: mysql
+ healthcheck:
+ test: [ "CMD", "mysqladmin" ,"ping", "-h", "localhost" ]
+ timeout: 20s
+ retries: 10
+ volumes:
+ - ./docker-compose/mysql:/docker-entrypoint-initdb.d
+ - marketing_db_data:/var/lib/mysql
+ networks:
+ - marketing-api-local-net
+ env_file: ./backend/.env
+volumes:
+ marketing_db_data:
+networks:
+ marketing-api-local-net:
+ driver: bridge
\ No newline at end of file
diff --git a/docker-compose/redis/redis.conf b/docker-compose/redis/redis.conf
new file mode 100755
index 0000000..31766b3
--- /dev/null
+++ b/docker-compose/redis/redis.conf
@@ -0,0 +1,1316 @@
+# Redis configuration file example.
+#
+# Note that in order to read the configuration file, Redis must be
+# started with the file path as first argument:
+#
+# ./redis-server /path/to/redis.conf
+
+# Note on units: when memory size is needed, it is possible to specify
+# it in the usual form of 1k 5GB 4M and so forth:
+#
+# 1k => 1000 bytes
+# 1kb => 1024 bytes
+# 1m => 1000000 bytes
+# 1mb => 1024*1024 bytes
+# 1g => 1000000000 bytes
+# 1gb => 1024*1024*1024 bytes
+#
+# units are case insensitive so 1GB 1Gb 1gB are all the same.
+
+################################## INCLUDES ###################################
+
+# Include one or more other config files here. This is useful if you
+# have a standard template that goes to all Redis servers but also need
+# to customize a few per-server settings. Include files can include
+# other files, so use this wisely.
+#
+# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
+# from admin or Redis Sentinel. Since Redis always uses the last processed
+# line as value of a configuration directive, you'd better put includes
+# at the beginning of this file to avoid overwriting config change at runtime.
+#
+# If instead you are interested in using includes to override configuration
+# options, it is better to use include as the last line.
+#
+# include /path/to/local.conf
+# include /path/to/other.conf
+
+################################## MODULES #####################################
+
+# Load modules at startup. If the server is not able to load modules
+# it will abort. It is possible to use multiple loadmodule directives.
+#
+# loadmodule /path/to/my_module.so
+# loadmodule /path/to/other_module.so
+
+################################## NETWORK #####################################
+
+# By default, if no "bind" configuration directive is specified, Redis listens
+# for connections from all the network interfaces available on the server.
+# It is possible to listen to just one or multiple selected interfaces using
+# the "bind" configuration directive, followed by one or more IP addresses.
+#
+# Examples:
+#
+# bind 192.168.1.100 10.0.0.1
+# bind 127.0.0.1 ::1
+#
+# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
+# internet, binding to all the interfaces is dangerous and will expose the
+# instance to everybody on the internet. So by default we uncomment the
+# following bind directive, that will force Redis to listen only into
+# the IPv4 lookback interface address (this means Redis will be able to
+# accept connections only from clients running into the same computer it
+# is running).
+#
+# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
+# JUST COMMENT THE FOLLOWING LINE.
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+bind 127.0.0.1
+
+# Protected mode is a layer of security protection, in order to avoid that
+# Redis instances left open on the internet are accessed and exploited.
+#
+# When protected mode is on and if:
+#
+# 1) The server is not binding explicitly to a set of addresses using the
+# "bind" directive.
+# 2) No password is configured.
+#
+# The server only accepts connections from clients connecting from the
+# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain
+# sockets.
+#
+# By default protected mode is enabled. You should disable it only if
+# you are sure you want clients from other hosts to connect to Redis
+# even if no authentication is configured, nor a specific set of interfaces
+# are explicitly listed using the "bind" directive.
+protected-mode yes
+
+# Accept connections on the specified port, default is 6379 (IANA #815344).
+# If port 0 is specified Redis will not listen on a TCP socket.
+port 6379
+
+# TCP listen() backlog.
+#
+# In high requests-per-second environments you need an high backlog in order
+# to avoid slow clients connections issues. Note that the Linux kernel
+# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
+# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
+# in order to get the desired effect.
+tcp-backlog 511
+
+# Unix socket.
+#
+# Specify the path for the Unix socket that will be used to listen for
+# incoming connections. There is no default, so Redis will not listen
+# on a unix socket when not specified.
+#
+# unixsocket /var/run/redis/redis-server.sock
+# unixsocketperm 700
+
+# Close the connection after a client is idle for N seconds (0 to disable)
+timeout 0
+
+# TCP keepalive.
+#
+# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
+# of communication. This is useful for two reasons:
+#
+# 1) Detect dead peers.
+# 2) Take the connection alive from the point of view of network
+# equipment in the middle.
+#
+# On Linux, the specified value (in seconds) is the period used to send ACKs.
+# Note that to close the connection the double of the time is needed.
+# On other kernels the period depends on the kernel configuration.
+#
+# A reasonable value for this option is 300 seconds, which is the new
+# Redis default starting with Redis 3.2.1.
+tcp-keepalive 300
+
+################################# GENERAL #####################################
+
+# By default Redis does not run as a daemon. Use 'yes' if you need it.
+# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
+daemonize yes
+
+# If you run Redis from upstart or systemd, Redis can interact with your
+# supervision tree. Options:
+# supervised no - no supervision interaction
+# supervised upstart - signal upstart by putting Redis into SIGSTOP mode
+# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
+# supervised auto - detect upstart or systemd method based on
+# UPSTART_JOB or NOTIFY_SOCKET environment variables
+# Note: these supervision methods only signal "process is ready."
+# They do not enable continuous liveness pings back to your supervisor.
+supervised systemd
+
+# If a pid file is specified, Redis writes it where specified at startup
+# and removes it at exit.
+#
+# When the server runs non daemonized, no pid file is created if none is
+# specified in the configuration. When the server is daemonized, the pid file
+# is used even if not specified, defaulting to "/var/run/redis.pid".
+#
+# Creating a pid file is best effort: if Redis is not able to create it
+# nothing bad happens, the server will start and run normally.
+pidfile /var/run/redis/redis-server.pid
+
+# Specify the server verbosity level.
+# This can be one of:
+# debug (a lot of information, useful for development/testing)
+# verbose (many rarely useful info, but not a mess like the debug level)
+# notice (moderately verbose, what you want in production probably)
+# warning (only very important / critical messages are logged)
+loglevel notice
+
+# Specify the log file name. Also the empty string can be used to force
+# Redis to log on the standard output. Note that if you use standard
+# output for logging but daemonize, logs will be sent to /dev/null
+logfile /var/log/redis/redis-server.log
+
+# To enable logging to the system logger, just set 'syslog-enabled' to yes,
+# and optionally update the other syslog parameters to suit your needs.
+# syslog-enabled no
+
+# Specify the syslog identity.
+# syslog-ident redis
+
+# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
+# syslog-facility local0
+
+# Set the number of databases. The default database is DB 0, you can select
+# a different one on a per-connection basis using SELECT where
+# dbid is a number between 0 and 'databases'-1
+databases 16
+
+# By default Redis shows an ASCII art logo only when started to log to the
+# standard output and if the standard output is a TTY. Basically this means
+# that normally a logo is displayed only in interactive sessions.
+#
+# However it is possible to force the pre-4.0 behavior and always show a
+# ASCII art logo in startup logs by setting the following option to yes.
+always-show-logo yes
+
+################################ SNAPSHOTTING ################################
+#
+# Save the DB on disk:
+#
+# save
+#
+# Will save the DB if both the given number of seconds and the given
+# number of write operations against the DB occurred.
+#
+# In the example below the behaviour will be to save:
+# after 900 sec (15 min) if at least 1 key changed
+# after 300 sec (5 min) if at least 10 keys changed
+# after 60 sec if at least 10000 keys changed
+#
+# Note: you can disable saving completely by commenting out all "save" lines.
+#
+# It is also possible to remove all the previously configured save
+# points by adding a save directive with a single empty string argument
+# like in the following example:
+#
+# save ""
+
+save 900 1
+save 300 10
+save 60 10000
+
+# By default Redis will stop accepting writes if RDB snapshots are enabled
+# (at least one save point) and the latest background save failed.
+# This will make the user aware (in a hard way) that data is not persisting
+# on disk properly, otherwise chances are that no one will notice and some
+# disaster will happen.
+#
+# If the background saving process will start working again Redis will
+# automatically allow writes again.
+#
+# However if you have setup your proper monitoring of the Redis server
+# and persistence, you may want to disable this feature so that Redis will
+# continue to work as usual even if there are problems with disk,
+# permissions, and so forth.
+stop-writes-on-bgsave-error yes
+
+# Compress string objects using LZF when dump .rdb databases?
+# For default that's set to 'yes' as it's almost always a win.
+# If you want to save some CPU in the saving child set it to 'no' but
+# the dataset will likely be bigger if you have compressible values or keys.
+rdbcompression yes
+
+# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
+# This makes the format more resistant to corruption but there is a performance
+# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
+# for maximum performances.
+#
+# RDB files created with checksum disabled have a checksum of zero that will
+# tell the loading code to skip the check.
+rdbchecksum yes
+
+# The filename where to dump the DB
+dbfilename dump.rdb
+
+# The working directory.
+#
+# The DB will be written inside this directory, with the filename specified
+# above using the 'dbfilename' configuration directive.
+#
+# The Append Only File will also be created inside this directory.
+#
+# Note that you must specify a directory here, not a file name.
+dir /var/lib/redis
+
+################################# REPLICATION #################################
+
+# Master-Slave replication. Use slaveof to make a Redis instance a copy of
+# another Redis server. A few things to understand ASAP about Redis replication.
+#
+# 1) Redis replication is asynchronous, but you can configure a master to
+# stop accepting writes if it appears to be not connected with at least
+# a given number of slaves.
+# 2) Redis slaves are able to perform a partial resynchronization with the
+# master if the replication link is lost for a relatively small amount of
+# time. You may want to configure the replication backlog size (see the next
+# sections of this file) with a sensible value depending on your needs.
+# 3) Replication is automatic and does not need user intervention. After a
+# network partition slaves automatically try to reconnect to masters
+# and resynchronize with them.
+#
+# slaveof
+
+# If the master is password protected (using the "requirepass" configuration
+# directive below) it is possible to tell the slave to authenticate before
+# starting the replication synchronization process, otherwise the master will
+# refuse the slave request.
+#
+# masterauth
+
+# When a slave loses its connection with the master, or when the replication
+# is still in progress, the slave can act in two different ways:
+#
+# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
+# still reply to client requests, possibly with out of date data, or the
+# data set may just be empty if this is the first synchronization.
+#
+# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
+# an error "SYNC with master in progress" to all the kind of commands
+# but to INFO and SLAVEOF.
+#
+slave-serve-stale-data yes
+
+# You can configure a slave instance to accept writes or not. Writing against
+# a slave instance may be useful to store some ephemeral data (because data
+# written on a slave will be easily deleted after resync with the master) but
+# may also cause problems if clients are writing to it because of a
+# misconfiguration.
+#
+# Since Redis 2.6 by default slaves are read-only.
+#
+# Note: read only slaves are not designed to be exposed to untrusted clients
+# on the internet. It's just a protection layer against misuse of the instance.
+# Still a read only slave exports by default all the administrative commands
+# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
+# security of read only slaves using 'rename-command' to shadow all the
+# administrative / dangerous commands.
+slave-read-only yes
+
+# Replication SYNC strategy: disk or socket.
+#
+# -------------------------------------------------------
+# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY
+# -------------------------------------------------------
+#
+# New slaves and reconnecting slaves that are not able to continue the replication
+# process just receiving differences, need to do what is called a "full
+# synchronization". An RDB file is transmitted from the master to the slaves.
+# The transmission can happen in two different ways:
+#
+# 1) Disk-backed: The Redis master creates a new process that writes the RDB
+# file on disk. Later the file is transferred by the parent
+# process to the slaves incrementally.
+# 2) Diskless: The Redis master creates a new process that directly writes the
+# RDB file to slave sockets, without touching the disk at all.
+#
+# With disk-backed replication, while the RDB file is generated, more slaves
+# can be queued and served with the RDB file as soon as the current child producing
+# the RDB file finishes its work. With diskless replication instead once
+# the transfer starts, new slaves arriving will be queued and a new transfer
+# will start when the current one terminates.
+#
+# When diskless replication is used, the master waits a configurable amount of
+# time (in seconds) before starting the transfer in the hope that multiple slaves
+# will arrive and the transfer can be parallelized.
+#
+# With slow disks and fast (large bandwidth) networks, diskless replication
+# works better.
+repl-diskless-sync no
+
+# When diskless replication is enabled, it is possible to configure the delay
+# the server waits in order to spawn the child that transfers the RDB via socket
+# to the slaves.
+#
+# This is important since once the transfer starts, it is not possible to serve
+# new slaves arriving, that will be queued for the next RDB transfer, so the server
+# waits a delay in order to let more slaves arrive.
+#
+# The delay is specified in seconds, and by default is 5 seconds. To disable
+# it entirely just set it to 0 seconds and the transfer will start ASAP.
+repl-diskless-sync-delay 5
+
+# Slaves send PINGs to server in a predefined interval. It's possible to change
+# this interval with the repl_ping_slave_period option. The default value is 10
+# seconds.
+#
+# repl-ping-slave-period 10
+
+# The following option sets the replication timeout for:
+#
+# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
+# 2) Master timeout from the point of view of slaves (data, pings).
+# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
+#
+# It is important to make sure that this value is greater than the value
+# specified for repl-ping-slave-period otherwise a timeout will be detected
+# every time there is low traffic between the master and the slave.
+#
+# repl-timeout 60
+
+# Disable TCP_NODELAY on the slave socket after SYNC?
+#
+# If you select "yes" Redis will use a smaller number of TCP packets and
+# less bandwidth to send data to slaves. But this can add a delay for
+# the data to appear on the slave side, up to 40 milliseconds with
+# Linux kernels using a default configuration.
+#
+# If you select "no" the delay for data to appear on the slave side will
+# be reduced but more bandwidth will be used for replication.
+#
+# By default we optimize for low latency, but in very high traffic conditions
+# or when the master and slaves are many hops away, turning this to "yes" may
+# be a good idea.
+repl-disable-tcp-nodelay no
+
+# Set the replication backlog size. The backlog is a buffer that accumulates
+# slave data when slaves are disconnected for some time, so that when a slave
+# wants to reconnect again, often a full resync is not needed, but a partial
+# resync is enough, just passing the portion of data the slave missed while
+# disconnected.
+#
+# The bigger the replication backlog, the longer the time the slave can be
+# disconnected and later be able to perform a partial resynchronization.
+#
+# The backlog is only allocated once there is at least a slave connected.
+#
+# repl-backlog-size 1mb
+
+# After a master has no longer connected slaves for some time, the backlog
+# will be freed. The following option configures the amount of seconds that
+# need to elapse, starting from the time the last slave disconnected, for
+# the backlog buffer to be freed.
+#
+# Note that slaves never free the backlog for timeout, since they may be
+# promoted to masters later, and should be able to correctly "partially
+# resynchronize" with the slaves: hence they should always accumulate backlog.
+#
+# A value of 0 means to never release the backlog.
+#
+# repl-backlog-ttl 3600
+
+# The slave priority is an integer number published by Redis in the INFO output.
+# It is used by Redis Sentinel in order to select a slave to promote into a
+# master if the master is no longer working correctly.
+#
+# A slave with a low priority number is considered better for promotion, so
+# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
+# pick the one with priority 10, that is the lowest.
+#
+# However a special priority of 0 marks the slave as not able to perform the
+# role of master, so a slave with priority of 0 will never be selected by
+# Redis Sentinel for promotion.
+#
+# By default the priority is 100.
+slave-priority 100
+
+# It is possible for a master to stop accepting writes if there are less than
+# N slaves connected, having a lag less or equal than M seconds.
+#
+# The N slaves need to be in "online" state.
+#
+# The lag in seconds, that must be <= the specified value, is calculated from
+# the last ping received from the slave, that is usually sent every second.
+#
+# This option does not GUARANTEE that N replicas will accept the write, but
+# will limit the window of exposure for lost writes in case not enough slaves
+# are available, to the specified number of seconds.
+#
+# For example to require at least 3 slaves with a lag <= 10 seconds use:
+#
+# min-slaves-to-write 3
+# min-slaves-max-lag 10
+#
+# Setting one or the other to 0 disables the feature.
+#
+# By default min-slaves-to-write is set to 0 (feature disabled) and
+# min-slaves-max-lag is set to 10.
+
+# A Redis master is able to list the address and port of the attached
+# slaves in different ways. For example the "INFO replication" section
+# offers this information, which is used, among other tools, by
+# Redis Sentinel in order to discover slave instances.
+# Another place where this info is available is in the output of the
+# "ROLE" command of a master.
+#
+# The listed IP and address normally reported by a slave is obtained
+# in the following way:
+#
+# IP: The address is auto detected by checking the peer address
+# of the socket used by the slave to connect with the master.
+#
+# Port: The port is communicated by the slave during the replication
+# handshake, and is normally the port that the slave is using to
+# list for connections.
+#
+# However when port forwarding or Network Address Translation (NAT) is
+# used, the slave may be actually reachable via different IP and port
+# pairs. The following two options can be used by a slave in order to
+# report to its master a specific set of IP and port, so that both INFO
+# and ROLE will report those values.
+#
+# There is no need to use both the options if you need to override just
+# the port or the IP address.
+#
+# slave-announce-ip 5.5.5.5
+# slave-announce-port 1234
+
+################################## SECURITY ###################################
+
+# Require clients to issue AUTH before processing any other
+# commands. This might be useful in environments in which you do not trust
+# others with access to the host running redis-server.
+#
+# This should stay commented out for backward compatibility and because most
+# people do not need auth (e.g. they run their own servers).
+#
+# Warning: since Redis is pretty fast an outside user can try up to
+# 150k passwords per second against a good box. This means that you should
+# use a very strong password otherwise it will be very easy to break.
+#
+# requirepass foobared
+
+# Command renaming.
+#
+# It is possible to change the name of dangerous commands in a shared
+# environment. For instance the CONFIG command may be renamed into something
+# hard to guess so that it will still be available for internal-use tools
+# but not available for general clients.
+#
+# Example:
+#
+# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
+#
+# It is also possible to completely kill a command by renaming it into
+# an empty string:
+#
+# rename-command CONFIG ""
+#
+# Please note that changing the name of commands that are logged into the
+# AOF file or transmitted to slaves may cause problems.
+
+################################### CLIENTS ####################################
+
+# Set the max number of connected clients at the same time. By default
+# this limit is set to 10000 clients, however if the Redis server is not
+# able to configure the process file limit to allow for the specified limit
+# the max number of allowed clients is set to the current file limit
+# minus 32 (as Redis reserves a few file descriptors for internal uses).
+#
+# Once the limit is reached Redis will close all the new connections sending
+# an error 'max number of clients reached'.
+#
+# maxclients 10000
+
+############################## MEMORY MANAGEMENT ################################
+
+# Set a memory usage limit to the specified amount of bytes.
+# When the memory limit is reached Redis will try to remove keys
+# according to the eviction policy selected (see maxmemory-policy).
+#
+# If Redis can't remove keys according to the policy, or if the policy is
+# set to 'noeviction', Redis will start to reply with errors to commands
+# that would use more memory, like SET, LPUSH, and so on, and will continue
+# to reply to read-only commands like GET.
+#
+# This option is usually useful when using Redis as an LRU or LFU cache, or to
+# set a hard memory limit for an instance (using the 'noeviction' policy).
+#
+# WARNING: If you have slaves attached to an instance with maxmemory on,
+# the size of the output buffers needed to feed the slaves are subtracted
+# from the used memory count, so that network problems / resyncs will
+# not trigger a loop where keys are evicted, and in turn the output
+# buffer of slaves is full with DELs of keys evicted triggering the deletion
+# of more keys, and so forth until the database is completely emptied.
+#
+# In short... if you have slaves attached it is suggested that you set a lower
+# limit for maxmemory so that there is some free RAM on the system for slave
+# output buffers (but this is not needed if the policy is 'noeviction').
+#
+# maxmemory
+
+# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
+# is reached. You can select among five behaviors:
+#
+# volatile-lru -> Evict using approximated LRU among the keys with an expire set.
+# allkeys-lru -> Evict any key using approximated LRU.
+# volatile-lfu -> Evict using approximated LFU among the keys with an expire set.
+# allkeys-lfu -> Evict any key using approximated LFU.
+# volatile-random -> Remove a random key among the ones with an expire set.
+# allkeys-random -> Remove a random key, any key.
+# volatile-ttl -> Remove the key with the nearest expire time (minor TTL)
+# noeviction -> Don't evict anything, just return an error on write operations.
+#
+# LRU means Least Recently Used
+# LFU means Least Frequently Used
+#
+# Both LRU, LFU and volatile-ttl are implemented using approximated
+# randomized algorithms.
+#
+# Note: with any of the above policies, Redis will return an error on write
+# operations, when there are no suitable keys for eviction.
+#
+# At the date of writing these commands are: set setnx setex append
+# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
+# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
+# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
+# getset mset msetnx exec sort
+#
+# The default is:
+#
+# maxmemory-policy noeviction
+
+# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated
+# algorithms (in order to save memory), so you can tune it for speed or
+# accuracy. For default Redis will check five keys and pick the one that was
+# used less recently, you can change the sample size using the following
+# configuration directive.
+#
+# The default of 5 produces good enough results. 10 Approximates very closely
+# true LRU but costs more CPU. 3 is faster but not very accurate.
+#
+# maxmemory-samples 5
+
+############################# LAZY FREEING ####################################
+
+# Redis has two primitives to delete keys. One is called DEL and is a blocking
+# deletion of the object. It means that the server stops processing new commands
+# in order to reclaim all the memory associated with an object in a synchronous
+# way. If the key deleted is associated with a small object, the time needed
+# in order to execute the DEL command is very small and comparable to most other
+# O(1) or O(log_N) commands in Redis. However if the key is associated with an
+# aggregated value containing millions of elements, the server can block for
+# a long time (even seconds) in order to complete the operation.
+#
+# For the above reasons Redis also offers non blocking deletion primitives
+# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and
+# FLUSHDB commands, in order to reclaim memory in background. Those commands
+# are executed in constant time. Another thread will incrementally free the
+# object in the background as fast as possible.
+#
+# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled.
+# It's up to the design of the application to understand when it is a good
+# idea to use one or the other. However the Redis server sometimes has to
+# delete keys or flush the whole database as a side effect of other operations.
+# Specifically Redis deletes objects independently of a user call in the
+# following scenarios:
+#
+# 1) On eviction, because of the maxmemory and maxmemory policy configurations,
+# in order to make room for new data, without going over the specified
+# memory limit.
+# 2) Because of expire: when a key with an associated time to live (see the
+# EXPIRE command) must be deleted from memory.
+# 3) Because of a side effect of a command that stores data on a key that may
+# already exist. For example the RENAME command may delete the old key
+# content when it is replaced with another one. Similarly SUNIONSTORE
+# or SORT with STORE option may delete existing keys. The SET command
+# itself removes any old content of the specified key in order to replace
+# it with the specified string.
+# 4) During replication, when a slave performs a full resynchronization with
+# its master, the content of the whole database is removed in order to
+# load the RDB file just transfered.
+#
+# In all the above cases the default is to delete objects in a blocking way,
+# like if DEL was called. However you can configure each case specifically
+# in order to instead release memory in a non-blocking way like if UNLINK
+# was called, using the following configuration directives:
+
+lazyfree-lazy-eviction no
+lazyfree-lazy-expire no
+lazyfree-lazy-server-del no
+slave-lazy-flush no
+
+############################## APPEND ONLY MODE ###############################
+
+# By default Redis asynchronously dumps the dataset on disk. This mode is
+# good enough in many applications, but an issue with the Redis process or
+# a power outage may result into a few minutes of writes lost (depending on
+# the configured save points).
+#
+# The Append Only File is an alternative persistence mode that provides
+# much better durability. For instance using the default data fsync policy
+# (see later in the config file) Redis can lose just one second of writes in a
+# dramatic event like a server power outage, or a single write if something
+# wrong with the Redis process itself happens, but the operating system is
+# still running correctly.
+#
+# AOF and RDB persistence can be enabled at the same time without problems.
+# If the AOF is enabled on startup Redis will load the AOF, that is the file
+# with the better durability guarantees.
+#
+# Please check http://redis.io/topics/persistence for more information.
+
+appendonly no
+
+# The name of the append only file (default: "appendonly.aof")
+
+appendfilename "appendonly.aof"
+
+# The fsync() call tells the Operating System to actually write data on disk
+# instead of waiting for more data in the output buffer. Some OS will really flush
+# data on disk, some other OS will just try to do it ASAP.
+#
+# Redis supports three different modes:
+#
+# no: don't fsync, just let the OS flush the data when it wants. Faster.
+# always: fsync after every write to the append only log. Slow, Safest.
+# everysec: fsync only one time every second. Compromise.
+#
+# The default is "everysec", as that's usually the right compromise between
+# speed and data safety. It's up to you to understand if you can relax this to
+# "no" that will let the operating system flush the output buffer when
+# it wants, for better performances (but if you can live with the idea of
+# some data loss consider the default persistence mode that's snapshotting),
+# or on the contrary, use "always" that's very slow but a bit safer than
+# everysec.
+#
+# More details please check the following article:
+# http://antirez.com/post/redis-persistence-demystified.html
+#
+# If unsure, use "everysec".
+
+# appendfsync always
+appendfsync everysec
+# appendfsync no
+
+# When the AOF fsync policy is set to always or everysec, and a background
+# saving process (a background save or AOF log background rewriting) is
+# performing a lot of I/O against the disk, in some Linux configurations
+# Redis may block too long on the fsync() call. Note that there is no fix for
+# this currently, as even performing fsync in a different thread will block
+# our synchronous write(2) call.
+#
+# In order to mitigate this problem it's possible to use the following option
+# that will prevent fsync() from being called in the main process while a
+# BGSAVE or BGREWRITEAOF is in progress.
+#
+# This means that while another child is saving, the durability of Redis is
+# the same as "appendfsync none". In practical terms, this means that it is
+# possible to lose up to 30 seconds of log in the worst scenario (with the
+# default Linux settings).
+#
+# If you have latency problems turn this to "yes". Otherwise leave it as
+# "no" that is the safest pick from the point of view of durability.
+
+no-appendfsync-on-rewrite no
+
+# Automatic rewrite of the append only file.
+# Redis is able to automatically rewrite the log file implicitly calling
+# BGREWRITEAOF when the AOF log size grows by the specified percentage.
+#
+# This is how it works: Redis remembers the size of the AOF file after the
+# latest rewrite (if no rewrite has happened since the restart, the size of
+# the AOF at startup is used).
+#
+# This base size is compared to the current size. If the current size is
+# bigger than the specified percentage, the rewrite is triggered. Also
+# you need to specify a minimal size for the AOF file to be rewritten, this
+# is useful to avoid rewriting the AOF file even if the percentage increase
+# is reached but it is still pretty small.
+#
+# Specify a percentage of zero in order to disable the automatic AOF
+# rewrite feature.
+
+auto-aof-rewrite-percentage 100
+auto-aof-rewrite-min-size 64mb
+
+# An AOF file may be found to be truncated at the end during the Redis
+# startup process, when the AOF data gets loaded back into memory.
+# This may happen when the system where Redis is running
+# crashes, especially when an ext4 filesystem is mounted without the
+# data=ordered option (however this can't happen when Redis itself
+# crashes or aborts but the operating system still works correctly).
+#
+# Redis can either exit with an error when this happens, or load as much
+# data as possible (the default now) and start if the AOF file is found
+# to be truncated at the end. The following option controls this behavior.
+#
+# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
+# the Redis server starts emitting a log to inform the user of the event.
+# Otherwise if the option is set to no, the server aborts with an error
+# and refuses to start. When the option is set to no, the user requires
+# to fix the AOF file using the "redis-check-aof" utility before to restart
+# the server.
+#
+# Note that if the AOF file will be found to be corrupted in the middle
+# the server will still exit with an error. This option only applies when
+# Redis will try to read more data from the AOF file but not enough bytes
+# will be found.
+aof-load-truncated yes
+
+# When rewriting the AOF file, Redis is able to use an RDB preamble in the
+# AOF file for faster rewrites and recoveries. When this option is turned
+# on the rewritten AOF file is composed of two different stanzas:
+#
+# [RDB file][AOF tail]
+#
+# When loading Redis recognizes that the AOF file starts with the "REDIS"
+# string and loads the prefixed RDB file, and continues loading the AOF
+# tail.
+#
+# This is currently turned off by default in order to avoid the surprise
+# of a format change, but will at some point be used as the default.
+aof-use-rdb-preamble no
+
+################################ LUA SCRIPTING ###############################
+
+# Max execution time of a Lua script in milliseconds.
+#
+# If the maximum execution time is reached Redis will log that a script is
+# still in execution after the maximum allowed time and will start to
+# reply to queries with an error.
+#
+# When a long running script exceeds the maximum execution time only the
+# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
+# used to stop a script that did not yet called write commands. The second
+# is the only way to shut down the server in the case a write command was
+# already issued by the script but the user doesn't want to wait for the natural
+# termination of the script.
+#
+# Set it to 0 or a negative value for unlimited execution without warnings.
+lua-time-limit 5000
+
+################################ REDIS CLUSTER ###############################
+#
+# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however
+# in order to mark it as "mature" we need to wait for a non trivial percentage
+# of users to deploy it in production.
+# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+#
+# Normal Redis instances can't be part of a Redis Cluster; only nodes that are
+# started as cluster nodes can. In order to start a Redis instance as a
+# cluster node enable the cluster support uncommenting the following:
+#
+# cluster-enabled yes
+
+# Every cluster node has a cluster configuration file. This file is not
+# intended to be edited by hand. It is created and updated by Redis nodes.
+# Every Redis Cluster node requires a different cluster configuration file.
+# Make sure that instances running in the same system do not have
+# overlapping cluster configuration file names.
+#
+# cluster-config-file nodes-6379.conf
+
+# Cluster node timeout is the amount of milliseconds a node must be unreachable
+# for it to be considered in failure state.
+# Most other internal time limits are multiple of the node timeout.
+#
+# cluster-node-timeout 15000
+
+# A slave of a failing master will avoid to start a failover if its data
+# looks too old.
+#
+# There is no simple way for a slave to actually have an exact measure of
+# its "data age", so the following two checks are performed:
+#
+# 1) If there are multiple slaves able to failover, they exchange messages
+# in order to try to give an advantage to the slave with the best
+# replication offset (more data from the master processed).
+# Slaves will try to get their rank by offset, and apply to the start
+# of the failover a delay proportional to their rank.
+#
+# 2) Every single slave computes the time of the last interaction with
+# its master. This can be the last ping or command received (if the master
+# is still in the "connected" state), or the time that elapsed since the
+# disconnection with the master (if the replication link is currently down).
+# If the last interaction is too old, the slave will not try to failover
+# at all.
+#
+# The point "2" can be tuned by user. Specifically a slave will not perform
+# the failover if, since the last interaction with the master, the time
+# elapsed is greater than:
+#
+# (node-timeout * slave-validity-factor) + repl-ping-slave-period
+#
+# So for example if node-timeout is 30 seconds, and the slave-validity-factor
+# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the
+# slave will not try to failover if it was not able to talk with the master
+# for longer than 310 seconds.
+#
+# A large slave-validity-factor may allow slaves with too old data to failover
+# a master, while a too small value may prevent the cluster from being able to
+# elect a slave at all.
+#
+# For maximum availability, it is possible to set the slave-validity-factor
+# to a value of 0, which means, that slaves will always try to failover the
+# master regardless of the last time they interacted with the master.
+# (However they'll always try to apply a delay proportional to their
+# offset rank).
+#
+# Zero is the only value able to guarantee that when all the partitions heal
+# the cluster will always be able to continue.
+#
+# cluster-slave-validity-factor 10
+
+# Cluster slaves are able to migrate to orphaned masters, that are masters
+# that are left without working slaves. This improves the cluster ability
+# to resist to failures as otherwise an orphaned master can't be failed over
+# in case of failure if it has no working slaves.
+#
+# Slaves migrate to orphaned masters only if there are still at least a
+# given number of other working slaves for their old master. This number
+# is the "migration barrier". A migration barrier of 1 means that a slave
+# will migrate only if there is at least 1 other working slave for its master
+# and so forth. It usually reflects the number of slaves you want for every
+# master in your cluster.
+#
+# Default is 1 (slaves migrate only if their masters remain with at least
+# one slave). To disable migration just set it to a very large value.
+# A value of 0 can be set but is useful only for debugging and dangerous
+# in production.
+#
+# cluster-migration-barrier 1
+
+# By default Redis Cluster nodes stop accepting queries if they detect there
+# is at least an hash slot uncovered (no available node is serving it).
+# This way if the cluster is partially down (for example a range of hash slots
+# are no longer covered) all the cluster becomes, eventually, unavailable.
+# It automatically returns available as soon as all the slots are covered again.
+#
+# However sometimes you want the subset of the cluster which is working,
+# to continue to accept queries for the part of the key space that is still
+# covered. In order to do so, just set the cluster-require-full-coverage
+# option to no.
+#
+# cluster-require-full-coverage yes
+
+# This option, when set to yes, prevents slaves from trying to failover its
+# master during master failures. However the master can still perform a
+# manual failover, if forced to do so.
+#
+# This is useful in different scenarios, especially in the case of multiple
+# data center operations, where we want one side to never be promoted if not
+# in the case of a total DC failure.
+#
+# cluster-slave-no-failover no
+
+# In order to setup your cluster make sure to read the documentation
+# available at http://redis.io web site.
+
+########################## CLUSTER DOCKER/NAT support ########################
+
+# In certain deployments, Redis Cluster nodes address discovery fails, because
+# addresses are NAT-ted or because ports are forwarded (the typical case is
+# Docker and other containers).
+#
+# In order to make Redis Cluster working in such environments, a static
+# configuration where each node knows its public address is needed. The
+# following two options are used for this scope, and are:
+#
+# * cluster-announce-ip
+# * cluster-announce-port
+# * cluster-announce-bus-port
+#
+# Each instruct the node about its address, client port, and cluster message
+# bus port. The information is then published in the header of the bus packets
+# so that other nodes will be able to correctly map the address of the node
+# publishing the information.
+#
+# If the above options are not used, the normal Redis Cluster auto-detection
+# will be used instead.
+#
+# Note that when remapped, the bus port may not be at the fixed offset of
+# clients port + 10000, so you can specify any port and bus-port depending
+# on how they get remapped. If the bus-port is not set, a fixed offset of
+# 10000 will be used as usually.
+#
+# Example:
+#
+# cluster-announce-ip 10.1.1.5
+# cluster-announce-port 6379
+# cluster-announce-bus-port 6380
+
+################################## SLOW LOG ###################################
+
+# The Redis Slow Log is a system to log queries that exceeded a specified
+# execution time. The execution time does not include the I/O operations
+# like talking with the client, sending the reply and so forth,
+# but just the time needed to actually execute the command (this is the only
+# stage of command execution where the thread is blocked and can not serve
+# other requests in the meantime).
+#
+# You can configure the slow log with two parameters: one tells Redis
+# what is the execution time, in microseconds, to exceed in order for the
+# command to get logged, and the other parameter is the length of the
+# slow log. When a new command is logged the oldest one is removed from the
+# queue of logged commands.
+
+# The following time is expressed in microseconds, so 1000000 is equivalent
+# to one second. Note that a negative number disables the slow log, while
+# a value of zero forces the logging of every command.
+slowlog-log-slower-than 10000
+
+# There is no limit to this length. Just be aware that it will consume memory.
+# You can reclaim memory used by the slow log with SLOWLOG RESET.
+slowlog-max-len 128
+
+################################ LATENCY MONITOR ##############################
+
+# The Redis latency monitoring subsystem samples different operations
+# at runtime in order to collect data related to possible sources of
+# latency of a Redis instance.
+#
+# Via the LATENCY command this information is available to the user that can
+# print graphs and obtain reports.
+#
+# The system only logs operations that were performed in a time equal or
+# greater than the amount of milliseconds specified via the
+# latency-monitor-threshold configuration directive. When its value is set
+# to zero, the latency monitor is turned off.
+#
+# By default latency monitoring is disabled since it is mostly not needed
+# if you don't have latency issues, and collecting data has a performance
+# impact, that while very small, can be measured under big load. Latency
+# monitoring can easily be enabled at runtime using the command
+# "CONFIG SET latency-monitor-threshold " if needed.
+latency-monitor-threshold 0
+
+############################# EVENT NOTIFICATION ##############################
+
+# Redis can notify Pub/Sub clients about events happening in the key space.
+# This feature is documented at http://redis.io/topics/notifications
+#
+# For instance if keyspace events notification is enabled, and a client
+# performs a DEL operation on key "foo" stored in the Database 0, two
+# messages will be published via Pub/Sub:
+#
+# PUBLISH __keyspace@0__:foo del
+# PUBLISH __keyevent@0__:del foo
+#
+# It is possible to select the events that Redis will notify among a set
+# of classes. Every class is identified by a single character:
+#
+# K Keyspace events, published with __keyspace@__ prefix.
+# E Keyevent events, published with __keyevent@__ prefix.
+# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
+# $ String commands
+# l List commands
+# s Set commands
+# h Hash commands
+# z Sorted set commands
+# x Expired events (events generated every time a key expires)
+# e Evicted events (events generated when a key is evicted for maxmemory)
+# A Alias for g$lshzxe, so that the "AKE" string means all the events.
+#
+# The "notify-keyspace-events" takes as argument a string that is composed
+# of zero or multiple characters. The empty string means that notifications
+# are disabled.
+#
+# Example: to enable list and generic events, from the point of view of the
+# event name, use:
+#
+# notify-keyspace-events Elg
+#
+# Example 2: to get the stream of the expired keys subscribing to channel
+# name __keyevent@0__:expired use:
+#
+# notify-keyspace-events Ex
+#
+# By default all notifications are disabled because most users don't need
+# this feature and the feature has some overhead. Note that if you don't
+# specify at least one of K or E, no events will be delivered.
+notify-keyspace-events ""
+
+############################### ADVANCED CONFIG ###############################
+
+# Hashes are encoded using a memory efficient data structure when they have a
+# small number of entries, and the biggest entry does not exceed a given
+# threshold. These thresholds can be configured using the following directives.
+hash-max-ziplist-entries 512
+hash-max-ziplist-value 64
+
+# Lists are also encoded in a special way to save a lot of space.
+# The number of entries allowed per internal list node can be specified
+# as a fixed maximum size or a maximum number of elements.
+# For a fixed maximum size, use -5 through -1, meaning:
+# -5: max size: 64 Kb <-- not recommended for normal workloads
+# -4: max size: 32 Kb <-- not recommended
+# -3: max size: 16 Kb <-- probably not recommended
+# -2: max size: 8 Kb <-- good
+# -1: max size: 4 Kb <-- good
+# Positive numbers mean store up to _exactly_ that number of elements
+# per list node.
+# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
+# but if your use case is unique, adjust the settings as necessary.
+list-max-ziplist-size -2
+
+# Lists may also be compressed.
+# Compress depth is the number of quicklist ziplist nodes from *each* side of
+# the list to *exclude* from compression. The head and tail of the list
+# are always uncompressed for fast push/pop operations. Settings are:
+# 0: disable all list compression
+# 1: depth 1 means "don't start compressing until after 1 node into the list,
+# going from either the head or tail"
+# So: [head]->node->node->...->node->[tail]
+# [head], [tail] will always be uncompressed; inner nodes will compress.
+# 2: [head]->[next]->node->node->...->node->[prev]->[tail]
+# 2 here means: don't compress head or head->next or tail->prev or tail,
+# but compress all nodes between them.
+# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
+# etc.
+list-compress-depth 0
+
+# Sets have a special encoding in just one case: when a set is composed
+# of just strings that happen to be integers in radix 10 in the range
+# of 64 bit signed integers.
+# The following configuration setting sets the limit in the size of the
+# set in order to use this special memory saving encoding.
+set-max-intset-entries 512
+
+# Similarly to hashes and lists, sorted sets are also specially encoded in
+# order to save a lot of space. This encoding is only used when the length and
+# elements of a sorted set are below the following limits:
+zset-max-ziplist-entries 128
+zset-max-ziplist-value 64
+
+# HyperLogLog sparse representation bytes limit. The limit includes the
+# 16 bytes header. When an HyperLogLog using the sparse representation crosses
+# this limit, it is converted into the dense representation.
+#
+# A value greater than 16000 is totally useless, since at that point the
+# dense representation is more memory efficient.
+#
+# The suggested value is ~ 3000 in order to have the benefits of
+# the space efficient encoding without slowing down too much PFADD,
+# which is O(N) with the sparse encoding. The value can be raised to
+# ~ 10000 when CPU is not a concern, but space is, and the data set is
+# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
+hll-sparse-max-bytes 3000
+
+# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
+# order to help rehashing the main Redis hash table (the one mapping top-level
+# keys to values). The hash table implementation Redis uses (see dict.c)
+# performs a lazy rehashing: the more operation you run into a hash table
+# that is rehashing, the more rehashing "steps" are performed, so if the
+# server is idle the rehashing is never complete and some more memory is used
+# by the hash table.
+#
+# The default is to use this millisecond 10 times every second in order to
+# actively rehash the main dictionaries, freeing memory when possible.
+#
+# If unsure:
+# use "activerehashing no" if you have hard latency requirements and it is
+# not a good thing in your environment that Redis can reply from time to time
+# to queries with 2 milliseconds delay.
+#
+# use "activerehashing yes" if you don't have such hard requirements but
+# want to free memory asap when possible.
+activerehashing yes
+
+# The client output buffer limits can be used to force disconnection of clients
+# that are not reading data from the server fast enough for some reason (a
+# common reason is that a Pub/Sub client can't consume messages as fast as the
+# publisher can produce them).
+#
+# The limit can be set differently for the three different classes of clients:
+#
+# normal -> normal clients including MONITOR clients
+# slave -> slave clients
+# pubsub -> clients subscribed to at least one pubsub channel or pattern
+#
+# The syntax of every client-output-buffer-limit directive is the following:
+#
+# client-output-buffer-limit
+#
+# A client is immediately disconnected once the hard limit is reached, or if
+# the soft limit is reached and remains reached for the specified number of
+# seconds (continuously).
+# So for instance if the hard limit is 32 megabytes and the soft limit is
+# 16 megabytes / 10 seconds, the client will get disconnected immediately
+# if the size of the output buffers reach 32 megabytes, but will also get
+# disconnected if the client reaches 16 megabytes and continuously overcomes
+# the limit for 10 seconds.
+#
+# By default normal clients are not limited because they don't receive data
+# without asking (in a push way), but just after a request, so only
+# asynchronous clients may create a scenario where data is requested faster
+# than it can read.
+#
+# Instead there is a default limit for pubsub and slave clients, since
+# subscribers and slaves receive data in a push fashion.
+#
+# Both the hard or the soft limit can be disabled by setting them to zero.
+client-output-buffer-limit normal 0 0 0
+client-output-buffer-limit slave 256mb 64mb 60
+client-output-buffer-limit pubsub 32mb 8mb 60
+
+# Client query buffers accumulate new commands. They are limited to a fixed
+# amount by default in order to avoid that a protocol desynchronization (for
+# instance due to a bug in the client) will lead to unbound memory usage in
+# the query buffer. However you can configure it here if you have very special
+# needs, such us huge multi/exec requests or alike.
+#
+# client-query-buffer-limit 1gb
+
+# In the Redis protocol, bulk requests, that are, elements representing single
+# strings, are normally limited ot 512 mb. However you can change this limit
+# here.
+#
+# proto-max-bulk-len 512mb
+
+# Redis calls an internal function to perform many background tasks, like
+# closing connections of clients in timeout, purging expired keys that are
+# never requested, and so forth.
+#
+# Not all tasks are performed with the same frequency, but Redis checks for
+# tasks to perform according to the specified "hz" value.
+#
+# By default "hz" is set to 10. Raising the value will use more CPU when
+# Redis is idle, but at the same time will make Redis more responsive when
+# there are many keys expiring at the same time, and timeouts may be
+# handled with more precision.
+#
+# The range is between 1 and 500, however a value over 100 is usually not
+# a good idea. Most users should use the default of 10 and raise this up to
+# 100 only in environments where very low latency is required.
+hz 10
+
+# When a child rewrites the AOF file, if the following option is enabled
+# the file will be fsync-ed every 32 MB of data generated. This is useful
+# in order to commit the file to the disk more incrementally and avoid
+# big latency spikes.
+aof-rewrite-incremental-fsync yes
+
+# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good
+# idea to start with the default settings and only change them after investigating
+# how to improve the performances and how the keys LFU change over time, which
+# is possible to inspect via the OBJECT FREQ command.
+#
+# There are two tunable parameters in the Redis LFU implementation: the
+# counter logarithm factor and the counter decay time. It is important to
+# understand what the two parameters mean before changing them.
+#
+# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis
+# uses a probabilistic increment with logarithmic behavior. Given the value
+# of the old counter, when a key is accessed, the counter is incremented in
+# this way:
+#
+# 1. A random number R between 0 and 1 is extracted.
+# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1).
+# 3. The counter is incremented only if R < P.
+#
+# The default lfu-log-factor is 10. This is a table of how the frequency
+# counter changes with a different number of accesses with different
+# logarithmic factors:
+#
+# +--------+------------+------------+------------+------------+------------+
+# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits |
+# +--------+------------+------------+------------+------------+------------+
+# | 0 | 104 | 255 | 255 | 255 | 255 |
+# +--------+------------+------------+------------+------------+------------+
+# | 1 | 18 | 49 | 255 | 255 | 255 |
+# +--------+------------+------------+------------+------------+------------+
+# | 10 | 10 | 18 | 142 | 255 | 255 |
+# +--------+------------+------------+------------+------------+------------+
+# | 100 | 8 | 11 | 49 | 143 | 255 |
+# +--------+------------+------------+------------+------------+------------+
+#
+# NOTE: The above table was obtained by running the following commands:
+#
+# redis-benchmark -n 1000000 incr foo
+# redis-cli object freq foo
+#
+# NOTE 2: The counter initial value is 5 in order to give new objects a chance
+# to accumulate hits.
+#
+# The counter decay time is the time, in minutes, that must elapse in order
+# for the key counter to be divided by two (or decremented if it has a value
+# less <= 10).
+#
+# The default value for the lfu-decay-time is 1. A Special value of 0 means to
+# decay the counter every time it happens to be scanned.
+#
+# lfu-log-factor 10
+# lfu-decay-time 1
+
+########################### ACTIVE DEFRAGMENTATION #######################
+#
+# WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested
+# even in production and manually tested by multiple engineers for some
+# time.
+#
+# What is active defragmentation?
+# -------------------------------
+#
+# Active (online) defragmentation allows a Redis server to compact the
+# spaces left between small allocations and deallocations of data in memory,
+# thus allowing to reclaim back memory.
+#
+# Fragmentation is a natural process that happens with every allocator (but
+# less so with Jemalloc, fortunately) and certain workloads. Normally a server
+# restart is needed in order to lower the fragmentation, or at least to flush
+# away all the data and create it again. However thanks to this feature
+# implemented by Oran Agra for Redis 4.0 this process can happen at runtime
+# in an "hot" way, while the server is running.
+#
+# Basically when the fragmentation is over a certain level (see the
+# configuration options below) Redis will start to create new copies of the
+# values in contiguous memory regions by exploiting certain specific Jemalloc
+# features (in order to understand if an allocation is causing fragmentation
+# and to allocate it in a better place), and at the same time, will release the
+# old copies of the data. This process, repeated incrementally for all the keys
+# will cause the fragmentation to drop back to normal values.
+#
+# Important things to understand:
+#
+# 1. This feature is disabled by default, and only works if you compiled Redis
+# to use the copy of Jemalloc we ship with the source code of Redis.
+# This is the default with Linux builds.
+#
+# 2. You never need to enable this feature if you don't have fragmentation
+# issues.
+#
+# 3. Once you experience fragmentation, you can enable this feature when
+# needed with the command "CONFIG SET activedefrag yes".
+#
+# The configuration parameters are able to fine tune the behavior of the
+# defragmentation process. If you are not sure about what they mean it is
+# a good idea to leave the defaults untouched.
+
+# Enabled active defragmentation
+# activedefrag yes
+
+# Minimum amount of fragmentation waste to start active defrag
+# active-defrag-ignore-bytes 100mb
+
+# Minimum percentage of fragmentation to start active defrag
+# active-defrag-threshold-lower 10
+
+# Maximum percentage of fragmentation at which we use maximum effort
+# active-defrag-threshold-upper 100
+
+# Minimal effort for defrag in CPU percentage
+# active-defrag-cycle-min 25
+
+# Maximal effort for defrag in CPU percentage
+# active-defrag-cycle-max 75
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 1c279dd..632bc0b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,48 +1,68 @@
-asgiref==3.2.7
-Babel==2.8.0
-certifi==2020.4.5.1
-chardet==3.0.4
-debtcollector==2.0.1
-Django==3.0.5
-django-cors-headers==3.2.1
-django-extensions==2.2.9
-django-filter==2.2.0
-django-model-utils==4.0.0
-django-redis==4.11.0
-django-sendgrid-v5==0.8.1
-djangorestframework==3.11.0
-future==0.18.2
-gunicorn==20.0.4
-idna==2.9
-iso8601==0.1.12
-keystoneauth1==4.0.0
-msgpack==1.0.0
-mysqlclient==1.4.6
-netaddr==0.7.19
-netifaces==0.10.9
-os-service-types==1.7.0
-oslo.config==8.0.2
-oslo.i18n==4.0.1
-oslo.serialization==3.1.1
-oslo.utils==4.1.1
-pbr==5.4.5
-Pillow==7.1.1
-psycopg2-binary==2.8.5
-pyparsing==2.4.7
-python-dotenv==0.13.0
-python-http-client==3.2.7
-python-keystoneclient==4.0.0
-python-magic==0.4.15
-python-swiftclient==3.9.0
-pytz==2019.3
-PyYAML==5.3.1
-redis==3.4.1
-requests==2.23.0
-rfc3986==1.4.0
-sendgrid==6.2.2
-six==1.14.0
-sqlparse==0.3.1
-stevedore==1.32.0
-uritemplate==3.0.1
-urllib3==1.25.9
-wrapt==1.12.1
+asgiref==3.11.0
+babel==2.17.0
+boto3==1.42.2
+boto3-stubs==1.42.2
+boto3-stubs-full==1.42.2
+botocore==1.42.2
+botocore-stubs==1.42.2
+certifi==2025.11.12
+cffi==2.0.0
+chardet==5.2.0
+charset-normalizer==3.4.4
+cryptography==46.0.3
+debtcollector==3.0.0
+Django==6.0
+django-cors-headers==4.9.0
+django-extensions==4.1
+django-filter==25.2
+django-model-utils==5.0.0
+django-redis==6.0.0
+django-sendgrid-v5==1.3.0
+djangorestframework==3.16.1
+future==1.0.0
+gunicorn==23.0.0
+idna==3.11
+iso8601==2.1.0
+jmespath==1.0.1
+keystoneauth1==5.12.0
+MarkupSafe==3.0.3
+msgpack==1.1.2
+mysqlclient==2.2.7
+netaddr==1.3.0
+netifaces==0.11.0
+os-service-types==1.8.2
+oslo.config==10.1.0
+oslo.i18n==6.7.1
+oslo.serialization==5.8.0
+oslo.utils==9.2.0
+packaging==25.0
+pbr==7.0.3
+pillow==12.0.0
+psutil==7.1.3
+pycparser==2.23
+pyparsing==3.2.5
+python-dateutil==2.9.0.post0
+python-dotenv==1.2.1
+python-http-client==3.3.7
+python-keystoneclient==5.7.0
+python-magic==0.4.27
+python-swiftclient==4.9.0
+pytz==2025.2
+PyYAML==6.0.3
+redis==7.1.0
+requests==2.32.3
+rfc3986==2.0.0
+s3transfer==0.16.0
+sendgrid==6.12.5
+setuptools==80.9.0
+six==1.17.0
+sqlparse==0.5.4
+stevedore==5.6.0
+types-awscrt==0.29.2
+types-s3transfer==0.15.0
+typing_extensions==4.15.0
+tzdata==2025.2
+uritemplate==4.2.0
+urllib3==2.5.0
+Werkzeug==3.1.4
+wrapt==2.0.1
diff --git a/start_local_server.sh b/start_local_server.sh
new file mode 100755
index 0000000..af02c56
--- /dev/null
+++ b/start_local_server.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+set -e
+export DOCKER_SCAN_SUGGEST=false
+
+docker compose --env-file ./backend/.env up -d
+docker compose --env-file ./backend/.env exec app /bin/bash
\ No newline at end of file