mirror of
https://github.com/dat515-2025/Group-8.git
synced 2026-03-22 06:57:47 +01:00
update report, edit deployment, update tfvars.example
This commit is contained in:
4
.github/workflows/deploy-pr.yaml
vendored
4
.github/workflows/deploy-pr.yaml
vendored
@@ -33,7 +33,7 @@ jobs:
|
|||||||
runner: vhs
|
runner: vhs
|
||||||
mode: pr
|
mode: pr
|
||||||
pr_number: ${{ github.event.pull_request.number }}
|
pr_number: ${{ github.event.pull_request.number }}
|
||||||
base_domain: ${{ vars.DEV_BASE_DOMAIN }}
|
base_domain: ${{ vars.PROD_DOMAIN }}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
frontend:
|
frontend:
|
||||||
@@ -77,7 +77,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Helm upgrade/install PR preview
|
- name: Helm upgrade/install PR preview
|
||||||
env:
|
env:
|
||||||
DEV_BASE_DOMAIN: ${{ secrets.BASE_DOMAIN }}
|
DEV_BASE_DOMAIN: ${{ vars.BASE_DOMAIN }}
|
||||||
RABBITMQ_PASSWORD: ${{ secrets.PROD_RABBITMQ_PASSWORD }}
|
RABBITMQ_PASSWORD: ${{ secrets.PROD_RABBITMQ_PASSWORD }}
|
||||||
DB_PASSWORD: ${{ secrets.PROD_DB_PASSWORD }}
|
DB_PASSWORD: ${{ secrets.PROD_DB_PASSWORD }}
|
||||||
DIGEST: ${{ needs.build.outputs.digest }}
|
DIGEST: ${{ needs.build.outputs.digest }}
|
||||||
|
|||||||
@@ -1,5 +1,2 @@
|
|||||||
export const BACKEND_URL: string =
|
export const BACKEND_URL: string =
|
||||||
import.meta.env.VITE_BACKEND_URL ?? '';
|
import.meta.env.VITE_BACKEND_URL ?? 'http://127.0.0.1:8000';
|
||||||
|
|
||||||
export const VITE_UNIRATE_API_KEY: string =
|
|
||||||
import.meta.env.VITE_UNIRATE_API_KEY ?? 'wYXMiA0bz8AVRHtiS9hbKIr4VP3k5Qff8XnQdKQM45YM3IwFWP6y73r3KMkv1590';
|
|
||||||
@@ -20,6 +20,7 @@ through multiple bank accounts. Users can label their transactions with custom c
|
|||||||
filtering and visualization. New transactions are automatically fetched in the background.
|
filtering and visualization. New transactions are automatically fetched in the background.
|
||||||
|
|
||||||
## Architecture Overview
|
## Architecture Overview
|
||||||
|
|
||||||
Our system is a full‑stack web application composed of a React frontend, a FastAPI backend,
|
Our system is a full‑stack web application composed of a React frontend, a FastAPI backend,
|
||||||
a PostgreSQL database, and asynchronous background workers powered by Celery with RabbitMQ.
|
a PostgreSQL database, and asynchronous background workers powered by Celery with RabbitMQ.
|
||||||
Redis is available for caching/kv and may be used by Celery as a result backend. The backend
|
Redis is available for caching/kv and may be used by Celery as a result backend. The backend
|
||||||
@@ -44,14 +45,17 @@ flowchart LR
|
|||||||
```
|
```
|
||||||
|
|
||||||
The workflow works in the following way:
|
The workflow works in the following way:
|
||||||
|
|
||||||
- Client connects to the frontend. After login, frontend automatically fetches the stored transactions from
|
- Client connects to the frontend. After login, frontend automatically fetches the stored transactions from
|
||||||
the database via the backend API and currency rates from UniRate API.
|
the database via the backend API and currency rates from UniRate API.
|
||||||
- When the client opts for fetching new transactions via the Bank API, the backend delegates the task
|
- When the client opts for fetching new transactions via the Bank API, the backend delegates the task
|
||||||
to a background worker service via the Message queue.
|
to a background worker service via the Message queue.
|
||||||
- After successful load, these transactions are stored to the database and displayed to the client
|
- After successful load, these transactions are stored to the database and displayed to the client
|
||||||
- There is also a Task planner, that executes periodic tasks, like fetching new transactions automatically from the Bank APIs
|
- There is also a Task planner, that executes periodic tasks, like fetching new transactions automatically from the Bank
|
||||||
|
APIs
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
|
||||||
- The stored transactions are encrypted in the DB for security reasons.
|
- The stored transactions are encrypted in the DB for security reasons.
|
||||||
- For every pull request the full APP is deployed on a separate URL and the tests are run by github CI/CD
|
- For every pull request the full APP is deployed on a separate URL and the tests are run by github CI/CD
|
||||||
- On every push to main, the production app is automatically updated
|
- On every push to main, the production app is automatically updated
|
||||||
@@ -59,13 +63,17 @@ to a background worker service via the Message queue.
|
|||||||
|
|
||||||
### Components
|
### Components
|
||||||
|
|
||||||
- Frontend (frontend/): React + TypeScript app built with Vite. Talks to the backend via REST, handles login/registration, shows latest transactions, filtering, and allows adding transactions.
|
- Frontend (frontend/): React + TypeScript app built with Vite. Talks to the backend via REST, handles
|
||||||
- Backend API (backend/app): FastAPI app with routers under app/api for auth, users, categories, transactions, exchange rates and bankAPI. Uses FastAPI Users for auth (JWT + OAuth), SQLAlchemy ORM, and Pydantic v2 schemas.
|
login/registration, shows latest transactions, filtering, and allows adding transactions.
|
||||||
- Worker service (backend/app/workers): Celery worker handling asynchronous tasks (e.g., sending verification emails, future background processing).
|
- Backend API (backend/app): FastAPI app with routers under app/api for auth, users, categories, transactions, exchange
|
||||||
|
rates and bankAPI. Uses FastAPI Users for auth (JWT + OAuth), SQLAlchemy ORM, and Pydantic v2 schemas.
|
||||||
|
- Worker service (backend/app/workers): Celery worker handling asynchronous tasks (e.g., sending verification emails,
|
||||||
|
future background processing).
|
||||||
- Database (PostgreSQL): Persists users, categories, transactions; schema managed by Alembic migrations.
|
- Database (PostgreSQL): Persists users, categories, transactions; schema managed by Alembic migrations.
|
||||||
- Message Queue (RabbitMQ): Transports background jobs from the API to the worker.
|
- Message Queue (RabbitMQ): Transports background jobs from the API to the worker.
|
||||||
- Cache/Result Store (Redis): Available for caching or Celery result backend.
|
- Cache/Result Store (Redis): Available for caching or Celery result backend.
|
||||||
- Infrastructure as Code (tofu/): OpenTofu modules provisioning cluster services (RabbitMQ, Redis, Argo CD, cert-manager, Cloudflare tunnel, etc.).
|
- Infrastructure as Code (tofu/): OpenTofu modules provisioning cluster services (RabbitMQ, Redis, Argo CD,
|
||||||
|
cert-manager, Cloudflare tunnel, etc.).
|
||||||
- Deployment Chart (charts/myapp-chart/): Helm chart to deploy the application to Kubernetes.
|
- Deployment Chart (charts/myapp-chart/): Helm chart to deploy the application to Kubernetes.
|
||||||
|
|
||||||
### Technologies Used
|
### Technologies Used
|
||||||
@@ -75,160 +83,340 @@ to a background worker service via the Message queue.
|
|||||||
- Database: MariaDB with Maxscale
|
- Database: MariaDB with Maxscale
|
||||||
- Background jobs: RabbitMQ, Celery
|
- Background jobs: RabbitMQ, Celery
|
||||||
- Containerization/Orchestration: Docker, Docker Compose (dev), Kubernetes, Helm
|
- Containerization/Orchestration: Docker, Docker Compose (dev), Kubernetes, Helm
|
||||||
- IaC/Platform: Proxmox, Talos, Cloudflare pages, OpenTofu (Terraform), cert-manager, MetalLB, Cloudflare Tunnel, Prometheus, Loki
|
- IaC/Platform: Proxmox, Talos, Cloudflare pages, OpenTofu (Terraform), cert-manager, MetalLB, Cloudflare Tunnel,
|
||||||
|
Prometheus, Loki
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
### System Requirements
|
### System Requirements
|
||||||
|
|
||||||
- Operating System (dev): Linux, macOS, or Windows with Docker support
|
#### Development
|
||||||
- Operating System (prod): Linux with kubernetes
|
|
||||||
- Minimum RAM: 4 GB (8 GB recommended for running backend, frontend, and database together)
|
- Minimum RAM: 8 GB
|
||||||
- Storage: 4 GB free (Docker images may require additional space)
|
- Storage: 10 GB+ free
|
||||||
|
|
||||||
|
#### Production
|
||||||
|
|
||||||
|
- 1 + 4 nodes
|
||||||
|
- CPU: 4 cores
|
||||||
|
- RAM: 8 GB
|
||||||
|
- Storage: 200 GB
|
||||||
|
|
||||||
### Required Software
|
### Required Software
|
||||||
|
|
||||||
- Docker Desktop or Docker Engine
|
#### Development
|
||||||
|
|
||||||
|
- Docker
|
||||||
- Docker Compose
|
- Docker Compose
|
||||||
- Node.js and npm
|
- Node.js and npm
|
||||||
- Python 3.12+
|
- Python 3.12
|
||||||
- MariaDB 11
|
- MariaDB 11
|
||||||
- Helm 3.12+ and kubectl 1.29+
|
|
||||||
|
#### Production
|
||||||
|
|
||||||
|
##### Minimal:
|
||||||
|
|
||||||
|
- domain name with Cloudflare`s nameservers - tunnel, pages
|
||||||
|
- Kubernetes cluster
|
||||||
|
- kubectl
|
||||||
|
- Helm
|
||||||
- OpenTofu
|
- OpenTofu
|
||||||
|
|
||||||
### Environment Variables (common)
|
##### Our setup specifics:
|
||||||
|
|
||||||
# TODO: UPDATE
|
- Proxmox VE
|
||||||
- Backend: SECRET, FRONTEND_URL, BACKEND_URL, DATABASE_URL, RABBITMQ_URL, REDIS_URL, UNIRATE_API_KEY
|
- TalosOS cluster
|
||||||
|
- talosctl
|
||||||
|
- GitHub self-hosted runner with access to the cluster
|
||||||
|
- TailScale for remote access to cluster
|
||||||
|
|
||||||
- OAuth vars (Backend): MOJEID_CLIENT_ID/SECRET, BANKID_CLIENT_ID/SECRET (optional)
|
### Environment Variables
|
||||||
- Frontend: VITE_BACKEND_URL
|
|
||||||
|
#### Backend
|
||||||
|
|
||||||
|
- `MOJEID_CLIENT_ID`, `MOJEID_CLIENT_SECRET` \- OAuth client ID and secret for
|
||||||
|
MojeID - https://www.mojeid.cz/en/provider/
|
||||||
|
- `BANKID_CLIENT_ID`, `BANKID_CLIENT_SECRET` \- OAuth client ID and secret for BankID - https://developer.bankid.cz/
|
||||||
|
- `CSAS_CLIENT_ID`, `CSAS_CLIENT_SECRET` \- OAuth client ID and secret for Česká
|
||||||
|
spořitelna - https://developers.erstegroup.com/docs/apis/bank.csas
|
||||||
|
- `DATABASE_URL`(or `MARIADB_HOST`, `MARIADB_PORT`, `MARIADB_DB`, `MARIADB_USER`, `MARIADB_PASSWORD`) \- MariaDB
|
||||||
|
connection details
|
||||||
|
- `RABBITMQ_USERNAME`, `RABBITMQ_PASSWORD` \- credentials for RabbitMQ
|
||||||
|
- `SENTRY_DSN` \- Sentry DSN for error reporting
|
||||||
|
- `DB_ENCRYPTION_KEY` \- symmetric key for encrypting sensitive data in the database
|
||||||
|
- `SMTP_HOST`, `SMTP_PORT`, `SMTP_USERNAME`, `SMTP_PASSWORD`, `SMTP_USE_TLS`, `SMTP_USE_SSL`, `SMTP_FROM` \- SMTP
|
||||||
|
configuration (host, port, auth credentials, TLS/SSL options, sender).
|
||||||
|
- `UNIRATE_API_KEY` \- API key for UniRate.
|
||||||
|
|
||||||
|
#### Frontend
|
||||||
|
|
||||||
|
- `VITE_BACKEND_URL` \- URL of the backend API
|
||||||
|
|
||||||
### Dependencies (key libraries)
|
### Dependencies (key libraries)
|
||||||
Backend: FastAPI, fastapi-users, SQLAlchemy, pydantic v2, Alembic, Celery, uvicorn
|
|
||||||
|
Backend: FastAPI, fastapi-users, SQLAlchemy, pydantic v2, Alembic, Celery, uvicorn, pytest
|
||||||
Frontend: React, TypeScript, Vite
|
Frontend: React, TypeScript, Vite
|
||||||
|
|
||||||
## Local development
|
## Local development
|
||||||
|
|
||||||
You can run the project with Docker Compose and Python virtual environment for testing and dev purposes
|
You can run the project with Docker Compose and Python virtual environment for testing and development purposes
|
||||||
|
|
||||||
### 1) Clone the Repository
|
### 1) Clone the Repository
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/dat515-2025/Group-8.git
|
git clone https://github.com/dat515-2025/Group-8.git
|
||||||
cd 7project
|
cd Group-8/7project
|
||||||
```
|
```
|
||||||
|
|
||||||
### 2) Install dependencies
|
### 2) Install dependencies
|
||||||
|
|
||||||
Backend
|
Backend
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
cd backend
|
||||||
python3 -m venv .venv
|
python3 -m venv .venv
|
||||||
source .venv/bin/activate
|
source .venv/bin/activate
|
||||||
pip install -r requirements.txt
|
pip install -r requirements.txt
|
||||||
```
|
```
|
||||||
Frontend
|
|
||||||
|
### 3) Run Docker containers
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# In 7project/frontend
|
cd ..
|
||||||
npm install
|
docker compose up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
### 3) Manual Local Run
|
### 4) Prepare the database
|
||||||
|
|
||||||
Backend
|
|
||||||
```bash
|
```bash
|
||||||
# From the 7project/ directory
|
|
||||||
docker compose up --build
|
|
||||||
# This starts: MariaDB, RabbitMQ
|
|
||||||
|
|
||||||
# Set environment variables (or create .env file)
|
|
||||||
# TODO: fix
|
|
||||||
export SECRET=CHANGE_ME_SECRET
|
|
||||||
export FRONTEND_DOMAIN_SCHEME=http://localhost:5173
|
|
||||||
export BANKID_CLIENT_ID=CHANGE_ME
|
|
||||||
export BANKID_CLIENT_SECRET=CHANGE_ME
|
|
||||||
export CSAS_CLIENT_ID=CHANGE_ME
|
|
||||||
export CSAS_CLIENT_SECRET=CHANGE_ME
|
|
||||||
export MOJEID_CLIENT_ID=CHANGE_ME
|
|
||||||
export MOJEID_CLIENT_SECRET=CHANGE_ME
|
|
||||||
# Apply DB migrations (Alembic)
|
|
||||||
# From 7project
|
|
||||||
bash upgrade_database.sh
|
bash upgrade_database.sh
|
||||||
|
```
|
||||||
|
|
||||||
# Run API
|
### 5) Run backend
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd backend
|
||||||
|
|
||||||
|
#TODO: set env variables
|
||||||
uvicorn app.app:fastApi --reload --host 0.0.0.0 --port 8000
|
uvicorn app.app:fastApi --reload --host 0.0.0.0 --port 8000
|
||||||
|
```
|
||||||
|
|
||||||
|
### 6) Run Celery worker (optional, in another terminal)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd Group-8/7project/backend
|
||||||
|
source .venv/bin/activate
|
||||||
celery -A app.celery_app.celery_app worker -l info
|
celery -A app.celery_app.celery_app worker -l info
|
||||||
```
|
```
|
||||||
|
|
||||||
Frontend
|
### 7) Install frontend dependencies and run
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Configure backend URL for dev
|
cd ../frontend
|
||||||
echo 'VITE_BACKEND_URL=http://127.0.0.1:8000' > .env
|
npm i
|
||||||
npm run dev
|
npm run dev
|
||||||
# Open http://localhost:5173
|
|
||||||
```
|
```
|
||||||
|
|
||||||
- Backend default: http://127.0.0.1:8000 (OpenAPI at /docs)
|
- Backend available at: http://127.0.0.1:8000 (OpenAPI at /docs)
|
||||||
- Frontend default: http://localhost:5173
|
- Frontend available at: http://localhost:5173
|
||||||
|
|
||||||
## Build Instructions
|
## Build Instructions
|
||||||
|
|
||||||
### Backend
|
### Backend
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# run in project7/backend
|
cd 7project/backend
|
||||||
docker buildx build --platform linux/amd64,linux/arm64 -t your_container_registry/your_name --push .
|
# Dont forget to set correct image tag with your registry and name
|
||||||
|
# For example lukastrkan/cc-app-demo or gitea.ltrk.dev/lukas/cc-app-demo
|
||||||
|
docker buildx build --platform linux/amd64,linux/arm64 -t CHANGE_ME --push .
|
||||||
```
|
```
|
||||||
|
|
||||||
### Frontend
|
### Frontend
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# run in project7/frontend
|
cd project7/frontend
|
||||||
npm ci
|
npm ci
|
||||||
npm run build
|
npm run build
|
||||||
```
|
```
|
||||||
|
|
||||||
## Deployment Instructions
|
## Deployment Instructions
|
||||||
|
|
||||||
### Setup Cluster
|
### Setup Cluster
|
||||||
Deployment should work on any Kubernetes cluster. However, we are using 4 TalosOS virtual machines (1 control plane, 3 workers)
|
|
||||||
|
Deployment should work on any Kubernetes cluster. However, we are using 4 TalosOS virtual machines (1 control plane, 3
|
||||||
|
workers)
|
||||||
running on top of Proxmox VE.
|
running on top of Proxmox VE.
|
||||||
|
|
||||||
1) Create 4 VMs with TalosOS
|
1) Create at least 4 VMs with TalosOS (4 cores, 8 GB RAM, 200 GB disk)
|
||||||
2) Install talosctl for your OS: https://docs.siderolabs.com/talos/v1.10/getting-started/talosctl
|
2) Install talosctl for your OS: https://docs.siderolabs.com/talos/v1.10/getting-started/talosctl
|
||||||
3) Generate Talos config
|
3) Generate Talos config
|
||||||
```bash
|
4) Navigate to tofu directory
|
||||||
# TODO: add commands
|
|
||||||
```
|
|
||||||
4) Edit the generated worker.yaml
|
|
||||||
- add google container registry mirror
|
|
||||||
- add modules from config generator
|
|
||||||
- add extramounts for persistent storage
|
|
||||||
- add kernel modules
|
|
||||||
|
|
||||||
5) Apply the config to the VMs
|
|
||||||
```bash
|
```bash
|
||||||
#TODO: add config apply commands
|
cd 7project/tofu
|
||||||
|
````
|
||||||
|
|
||||||
|
5) Set IP addresses in environment variables
|
||||||
|
|
||||||
|
```bash
|
||||||
|
CONTROL_PLANE_IP=<control-plane-ip>
|
||||||
|
WORKER1_IP=<worker1-ip>
|
||||||
|
WORKER2_IP=<worker2-ip>
|
||||||
|
WORKER3_IP=<worker3-ip>
|
||||||
|
WORKER4_IP=<worker4-ip>
|
||||||
|
....
|
||||||
```
|
```
|
||||||
|
|
||||||
6) Verify the cluster is up
|
6) Create config files
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
# change my-cluster to your desired cluster name
|
||||||
|
talosctl gen config my-cluster https://$CONTROL_PLANE_IP:6443
|
||||||
```
|
```
|
||||||
|
|
||||||
7) Export kubeconfig
|
7) Edit the generated configs
|
||||||
```bash
|
|
||||||
# TODO: add export command
|
Apply the following changes to `worker.yaml`:
|
||||||
|
|
||||||
|
1) Add mounts for persistent storage to `machine.kubelet.extraMounts` section:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
extraMounts:
|
||||||
|
- destination: /var/lib/longhorn
|
||||||
|
type: bindind.
|
||||||
|
source: /var/lib/longhorn
|
||||||
|
options:
|
||||||
|
- bind
|
||||||
|
- rshared
|
||||||
|
- rw
|
||||||
```
|
```
|
||||||
|
|
||||||
|
2) Change `machine.install.image` to image with extra modules:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
image: factory.talos.dev/metal-installer/88d1f7a5c4f1d3aba7df787c448c1d3d008ed29cfb34af53fa0df4336a56040b:v1.11.1
|
||||||
|
```
|
||||||
|
|
||||||
|
or you can use latest image generated at https://factory.talos.dev with following options:
|
||||||
|
|
||||||
|
- Bare-metal machine
|
||||||
|
- your Talos os version
|
||||||
|
- amd64 architecture
|
||||||
|
- siderolabs/iscsi-tools
|
||||||
|
- siderolabs/util-linux-tools
|
||||||
|
- (Optionally) siderolabs/qemu-guest-agent
|
||||||
|
|
||||||
|
Then copy "Initial Installation" value and paste it to the image field.
|
||||||
|
|
||||||
|
3) Add docker registry mirror to `machine.registries.mirrors` section:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
registries:
|
||||||
|
mirrors:
|
||||||
|
docker.io:
|
||||||
|
endpoints:
|
||||||
|
- https://mirror.gcr.io
|
||||||
|
- https://registry-1.docker.io
|
||||||
|
```
|
||||||
|
|
||||||
|
8) Apply configs to the VMs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
talosctl apply-config --insecure --nodes $CONTROL_PLANE_IP --file controlplane.yaml
|
||||||
|
talosctl apply-config --insecure --nodes $WORKER1_IP --file worker.yaml
|
||||||
|
talosctl apply-config --insecure --nodes $WORKER2_IP --file worker.yaml
|
||||||
|
talosctl apply-config --insecure --nodes $WORKER3_IP --file worker.yaml
|
||||||
|
talosctl apply-config --insecure --nodes $WORKER4_IP --file worker.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
9) Boostrap the cluster and retrieve kubeconfig
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export TALOSCONFIG=$(pwd)/talosconfig
|
||||||
|
talosctl config endpoint https://$CONTROL_PLANE_IP:6443
|
||||||
|
talosctl config node $CONTROL_PLANE_IP
|
||||||
|
|
||||||
|
talosctl bootstrap
|
||||||
|
|
||||||
|
talosctl kubeconfig .
|
||||||
|
```
|
||||||
|
|
||||||
|
You can now use k8s client like https://headlamp.dev/ with the generated kubeconfig file.
|
||||||
|
|
||||||
|
### Install base services to the cluster
|
||||||
|
|
||||||
|
1) Copy and edit variables
|
||||||
|
|
||||||
### Install
|
|
||||||
1) Install base services to cluster
|
|
||||||
```bash
|
```bash
|
||||||
cd tofu
|
|
||||||
# copy and edit variables
|
|
||||||
cp terraform.tfvars.example terraform.tfvars
|
cp terraform.tfvars.example terraform.tfvars
|
||||||
# authenticate to your cluster/cloud as needed, then:
|
```
|
||||||
|
- `metallb_ip_range` - set to range available in your network for load balancer services
|
||||||
|
- `mariadb_password` - password for internal mariadb user
|
||||||
|
- `mariadb_root_password` - password for root user
|
||||||
|
- `mariadb_user_name` - username for admin user
|
||||||
|
- `mariadb_user_host` - allowed hosts for admin user
|
||||||
|
- `mariadb_user_password` - password for admin user
|
||||||
|
- `metallb_maxscale_ip`, `metallb_service_ip`, `metallb_primary_ip`, `metallb_secondary_ip` - IPs for database
|
||||||
|
cluster,
|
||||||
|
set them to static IPs from the `metallb_ip_range`
|
||||||
|
- `s3_enabled`, `s3_bucket`, `s3_region`, `s3_endpoint`, `s3_key_id`, `s3_key_secret` - S3 compatible storage for
|
||||||
|
backups (optional)
|
||||||
|
- `phpmyadmin_enabled` - set to false if you want to disable phpmyadmin
|
||||||
|
- `rabbitmq-password` - password for RabbitMQ
|
||||||
|
|
||||||
|
- `cloudflare_account_id` - your Cloudflare account ID
|
||||||
|
- `cloudflare_api_token` - your Cloudflare API token with permissions to manage tunnels and DNS
|
||||||
|
- `cloudflare_email` - your Cloudflare account email
|
||||||
|
- `cloudflare_tunnel_name` - name for the tunnel
|
||||||
|
- `cloudflare_domain` - your domain name managed in Cloudflare
|
||||||
|
|
||||||
|
2) Deploy without Cloudflare module first
|
||||||
|
|
||||||
|
```bash
|
||||||
tofu init
|
tofu init
|
||||||
tofu apply -exclude modules.cloudflare
|
tofu apply -exclude modules.cloudflare
|
||||||
|
```
|
||||||
|
|
||||||
|
3) Deploy rest of the modules
|
||||||
|
|
||||||
|
```bash
|
||||||
tofu apply
|
tofu apply
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Configure deployment
|
||||||
|
1) Create self-hosted runner with access to the cluster or make cluster publicly accessible
|
||||||
|
2) Change `jobs.deploy.runs-on` in `.github/workflows/deploy-prod.yml` and in `.github/workflows/deploy-pr.yaml` to your runner label
|
||||||
|
3) Add variables to GitHub in repository settings:
|
||||||
|
- `PROD_DOMAIN` - base domain for deployments (e.g. ltrk.cz)
|
||||||
|
- `DEV_FRONTEND_BASE_DOMAIN` - base domain for your cloudflare pages
|
||||||
|
4) Add secrets to GitHub in repository settings:
|
||||||
|
- CLOUDFLARE_ACCOUNT_ID - same as in tofu/terraform.tfvars
|
||||||
|
- CLOUDFLARE_API_TOKEN - same as in tofu/terraform.tfvars
|
||||||
|
- DOCKER_USER - your docker registry username
|
||||||
|
- DOCKER_PASSWORD - your docker registry password
|
||||||
|
- KUBE_CONFIG - content of your kubeconfig file for the cluster
|
||||||
|
- PROD_DB_PASSWORD - same as MARIADB_PASSWORD
|
||||||
|
- PROD_RABBITMQ_PASSWORD - same as MARIADB_PASSWORD
|
||||||
|
- PROD_DB_ENCRYPTION_KEY - same as DB_ENCRYPTION_KEY
|
||||||
|
- MOJEID_CLIENT_ID
|
||||||
|
- MOJEID_CLIENT_SECRET
|
||||||
|
- BANKID_CLIENT_ID
|
||||||
|
- BANKID_CLIENT_SECRET
|
||||||
|
- CSAS_CLIENT_ID
|
||||||
|
- CSAS_CLIENT_SECRET
|
||||||
|
- SENTRY_DSN
|
||||||
|
- SMTP_HOST
|
||||||
|
- SMTP_PORT
|
||||||
|
- SMTP_USERNAME
|
||||||
|
- SMTP_PASSWORD
|
||||||
|
- SMTP_FROM
|
||||||
|
- UNIRATE_API_KEY
|
||||||
|
5) On Github open Actions tab, select "Deploy Prod" and run workflow manually
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: REMOVE I guess
|
||||||
2) Deploy the app using Helm
|
2) Deploy the app using Helm
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Set the namespace
|
# Set the namespace
|
||||||
kubectl create namespace myapp || true
|
kubectl create namespace myapp || true
|
||||||
@@ -243,57 +431,45 @@ helm upgrade --install myapp charts/myapp-chart \
|
|||||||
--set env.FRONTEND_URL="https://myapp.example.com" \
|
--set env.FRONTEND_URL="https://myapp.example.com" \
|
||||||
--set env.SECRET="CHANGE_ME_SECRET"
|
--set env.SECRET="CHANGE_ME_SECRET"
|
||||||
```
|
```
|
||||||
Adjust values to your registry and domain. The chart’s NOTES.txt includes additional examples.
|
|
||||||
|
|
||||||
3) Expose and access
|
|
||||||
- If using Cloudflare Tunnel or an ingress, configure DNS accordingly (see tofu/modules/cloudflare and deployment/tunnel.yaml).
|
|
||||||
- For quick testing without ingress:
|
|
||||||
```bash
|
|
||||||
kubectl -n myapp port-forward deploy/myapp-backend 8000:8000
|
|
||||||
kubectl -n myapp port-forward deploy/myapp-frontend 5173:80
|
|
||||||
```
|
|
||||||
|
|
||||||
### Verification
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check pods
|
|
||||||
kubectl -n myapp get pods
|
|
||||||
|
|
||||||
# Backend health
|
|
||||||
curl -i http://127.0.0.1:8000/
|
|
||||||
# OpenAPI
|
|
||||||
open http://127.0.0.1:8000/docs
|
|
||||||
|
|
||||||
# Frontend (if port-forwarded)
|
|
||||||
open http://localhost:5173
|
|
||||||
```
|
|
||||||
|
|
||||||
## Testing Instructions
|
## Testing Instructions
|
||||||
The tests are located in 7project/backend/tests directory. All tests are run by GitHub actions on every pull request and push to main.
|
|
||||||
|
The tests are located in 7project/backend/tests directory. All tests are run by GitHub actions on every pull request and
|
||||||
|
push to main.
|
||||||
See the workflow [here](../.github/workflows/run-tests.yml).
|
See the workflow [here](../.github/workflows/run-tests.yml).
|
||||||
|
|
||||||
If you want to run the tests locally, the preferred is to use a [bash script](backend/test-with-ephemeral-mariadb.sh)
|
If you want to run the tests locally, the preferred is to use a [bash script](backend/test-with-ephemeral-mariadb.sh)
|
||||||
that will start a [test DB container](backend/docker-compose.test.yml) and remove it afterward.
|
that will start a [test DB container](backend/docker-compose.test.yml) and remove it afterward.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd 7project/backend
|
cd 7project/backend
|
||||||
bash test-with-ephemeral-mariadb.sh
|
bash test-with-ephemeral-mariadb.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
### Unit Tests
|
### Unit Tests
|
||||||
|
|
||||||
There are only 5 basic unit tests, since our services logic is very simple
|
There are only 5 basic unit tests, since our services logic is very simple
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash test-with-ephemeral-mariadb.sh --only-unit
|
bash test-with-ephemeral-mariadb.sh --only-unit
|
||||||
```
|
```
|
||||||
|
|
||||||
### Integration Tests
|
### Integration Tests
|
||||||
|
|
||||||
There are 9 basic unit tests, testing the individual backend API logic
|
There are 9 basic unit tests, testing the individual backend API logic
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash test-with-ephemeral-mariadb.sh --only-integration
|
bash test-with-ephemeral-mariadb.sh --only-integration
|
||||||
```
|
```
|
||||||
|
|
||||||
### End-to-End Tests
|
### End-to-End Tests
|
||||||
|
|
||||||
There are 7 e2e tests, testing more complex app logic
|
There are 7 e2e tests, testing more complex app logic
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash test-with-ephemeral-mariadb.sh --only-e2e
|
bash test-with-ephemeral-mariadb.sh --only-e2e
|
||||||
```
|
```
|
||||||
@@ -378,18 +554,18 @@ curl -H "Authorization: Bearer $TOKEN" http://127.0.0.1:8000/authenticated-route
|
|||||||
> This information is used for individual grading.
|
> This information is used for individual grading.
|
||||||
> Link to the specific commit on GitHub for each contribution.
|
> Link to the specific commit on GitHub for each contribution.
|
||||||
|
|
||||||
| Task/Component | Assigned To | Status | Time Spent | Difficulty | Notes |
|
| Task/Component | Assigned To | Status | Time Spent | Difficulty | Notes |
|
||||||
|-----------------------------------------------------------------------|-------------| ------------- |------------|------------| ----------- |
|
|-------------------------------------------------------------------------------------------------------------------|-------------|----------------|------------|------------|-------------|
|
||||||
| [Project Setup & Repository](https://github.com/dat515-2025/Group-8#) | Lukas | ✅ Complete | [X hours] | Medium | [Any notes] |
|
| [Project Setup & Repository](https://github.com/dat515-2025/Group-8#) | Lukas | ✅ Complete | [X hours] | Medium | [Any notes] |
|
||||||
| [Design Document](https://github.com/dat515-2025/Group-8/blob/main/6design/design.md) | Both | ✅ Complete | 4 Hours | Easy | [Any notes] |
|
| [Design Document](https://github.com/dat515-2025/Group-8/blob/main/6design/design.md) | Both | ✅ Complete | 4 Hours | Easy | [Any notes] |
|
||||||
| [Backend API Development](https://github.com/dat515-2025/Group-8/tree/main/7project/backend/app/api) | Dejan | ✅ Complete | 12 hours | Medium | [Any notes] |
|
| [Backend API Development](https://github.com/dat515-2025/Group-8/tree/main/7project/backend/app/api) | Dejan | ✅ Complete | 12 hours | Medium | [Any notes] |
|
||||||
| [Database Setup & Models](https://github.com/dat515-2025/Group-8/tree/main/7project/backend/app/models) | Lukas | 🔄 In Progress | [X hours] | Medium | [Any notes] |
|
| [Database Setup & Models](https://github.com/dat515-2025/Group-8/tree/main/7project/backend/app/models) | Lukas | 🔄 In Progress | [X hours] | Medium | [Any notes] |
|
||||||
| [Frontend Development](https://github.com/dat515-2025/Group-8/tree/main/7project/frontend) | Dejan | ✅ Complete | 17 hours | Medium | [Any notes] |
|
| [Frontend Development](https://github.com/dat515-2025/Group-8/tree/main/7project/frontend) | Dejan | ✅ Complete | 17 hours | Medium | [Any notes] |
|
||||||
| [Docker Configuration](https://github.com/dat515-2025/Group-8/blob/main/7project/compose.yml) | Lukas | ✅ Complete | [X hours] | Easy | [Any notes] |
|
| [Docker Configuration](https://github.com/dat515-2025/Group-8/blob/main/7project/compose.yml) | Lukas | ✅ Complete | [X hours] | Easy | [Any notes] |
|
||||||
| [Cloud Deployment](https://github.com/dat515-2025/Group-8/blob/main/7project/deployment/app-demo-deployment.yaml) | Lukas | ✅ Complete | [X hours] | Hard | [Any notes] |
|
| [Cloud Deployment](https://github.com/dat515-2025/Group-8/blob/main/7project/deployment/app-demo-deployment.yaml) | Lukas | ✅ Complete | [X hours] | Hard | [Any notes] |
|
||||||
| [Testing Implementation](https://github.com/dat515-2025/group-name) | Dejan | ✅ Complete | 16 hours | Medium | [Any notes] |
|
| [Testing Implementation](https://github.com/dat515-2025/group-name) | Dejan | ✅ Complete | 16 hours | Medium | [Any notes] |
|
||||||
| [Documentation](https://github.com/dat515-2025/group-name) | Both | 🔄 In Progress | [X hours] | Easy | [Any notes] |
|
| [Documentation](https://github.com/dat515-2025/group-name) | Both | 🔄 In Progress | [X hours] | Easy | [Any notes] |
|
||||||
| [Presentation Video](https://github.com/dat515-2025/group-name) | Both | ❌ Not Started | [X hours] | Medium | [Any notes] |
|
| [Presentation Video](https://github.com/dat515-2025/group-name) | Both | ❌ Not Started | [X hours] | Medium | [Any notes] |
|
||||||
|
|
||||||
**Legend**: ✅ Complete | 🔄 In Progress | ⏳ Pending | ❌ Not Started
|
**Legend**: ✅ Complete | 🔄 In Progress | ⏳ Pending | ❌ Not Started
|
||||||
|
|
||||||
@@ -423,7 +599,6 @@ curl -H "Authorization: Bearer $TOKEN" http://127.0.0.1:8000/authenticated-route
|
|||||||
| 4.11 to 6.11 | Frontend | 6 | Fixes, Improved UI, added support for mobile devices |
|
| 4.11 to 6.11 | Frontend | 6 | Fixes, Improved UI, added support for mobile devices |
|
||||||
| **Total** | | **63** | |
|
| **Total** | | **63** | |
|
||||||
|
|
||||||
|
|
||||||
### Group Total: [XXX.X] hours
|
### Group Total: [XXX.X] hours
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
# Example terraform.tfvars for MariaDB and MetalLB
|
|
||||||
|
|
||||||
metallb_ip_range = "10.80.0.100-10.80.0.240"
|
metallb_ip_range = "10.80.0.100-10.80.0.240"
|
||||||
|
|
||||||
# Secret configuration (use strong passwords; do not commit real secrets)
|
# Secret configuration (use strong passwords; do not commit real secrets)
|
||||||
@@ -11,13 +9,19 @@ mariadb_user_name = "example_user"
|
|||||||
mariadb_user_host = "%"
|
mariadb_user_host = "%"
|
||||||
mariadb_user_password = "example_user_password"
|
mariadb_user_password = "example_user_password"
|
||||||
|
|
||||||
# MetalLB IPs for services (optional)
|
# MetalLB IPs for services
|
||||||
metallb_maxscale_ip = "10.80.0.219"
|
metallb_maxscale_ip = "10.80.0.219"
|
||||||
metallb_service_ip = "10.80.0.120"
|
metallb_service_ip = "10.80.0.120"
|
||||||
metallb_primary_ip = "10.80.0.130"
|
metallb_primary_ip = "10.80.0.130"
|
||||||
metallb_secondary_ip = "10.80.0.131"
|
metallb_secondary_ip = "10.80.0.131"
|
||||||
|
|
||||||
# phpMyAdmin toggle
|
s3_enabled = false
|
||||||
|
s3_bucket = "cluster"
|
||||||
|
s3_region = "us-east-1"
|
||||||
|
s3_endpoint = "your.s3.endpoint.example"
|
||||||
|
s3_key_id = "your_s3_key_id"
|
||||||
|
s3_key_secret = "your_s3_key_secret"
|
||||||
|
|
||||||
phpmyadmin_enabled = true
|
phpmyadmin_enabled = true
|
||||||
|
|
||||||
cloudflare_account_id = "CHANGE_ME"
|
cloudflare_account_id = "CHANGE_ME"
|
||||||
@@ -26,4 +30,5 @@ cloudflare_email = "CHANGE_ME"
|
|||||||
cloudflare_tunnel_name = "CHANGE_ME"
|
cloudflare_tunnel_name = "CHANGE_ME"
|
||||||
cloudflare_domain = "CHANGE_ME"
|
cloudflare_domain = "CHANGE_ME"
|
||||||
|
|
||||||
|
rabbitmq-password = "CHANGE_ME"
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user