mirror of
https://github.com/dat515-2025/Group-8.git
synced 2026-03-22 15:12:08 +01:00
Compare commits
27 Commits
merge/upda
...
51-refacto
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f0c28ba9e1 | ||
|
|
b560c07d62 | ||
|
|
f0b1452e30 | ||
| 6effb2793a | |||
|
|
ba7798259c | ||
| deb67f421e | |||
| 74557eeea8 | |||
| 2e0619d03f | |||
| 31add42d6d | |||
| 4de79169a2 | |||
| 59d53967b0 | |||
| f3086f8c73 | |||
|
|
fd437b1caf | ||
| 96ebc27001 | |||
|
|
922651fdbf | ||
|
|
e164b185e0 | ||
|
|
186b4fd09a | ||
|
|
280d495335 | ||
|
|
e73233c90a | ||
|
|
aade78bf3f | ||
|
|
50e489a8e0 | ||
|
|
1679abb71f | ||
| 573404dead | |||
| d57dd82a64 | |||
| 50f37c1161 | |||
| ae22d2ee5f | |||
| 509608f8c9 |
2
.github/workflows/build-image.yaml
vendored
2
.github/workflows/build-image.yaml
vendored
@@ -15,7 +15,7 @@ on:
|
||||
context:
|
||||
description: "Docker build context path"
|
||||
required: false
|
||||
default: "7project/backend"
|
||||
default: "7project/src/backend"
|
||||
type: string
|
||||
pr_number:
|
||||
description: "PR number (required when mode=pr)"
|
||||
|
||||
10
.github/workflows/deploy-pr.yaml
vendored
10
.github/workflows/deploy-pr.yaml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
with:
|
||||
mode: pr
|
||||
image_repo: lukastrkan/cc-app-demo
|
||||
context: 7project/backend
|
||||
context: 7project/src/backend
|
||||
pr_number: ${{ github.event.pull_request.number }}
|
||||
secrets: inherit
|
||||
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
runner: vhs
|
||||
mode: pr
|
||||
pr_number: ${{ github.event.pull_request.number }}
|
||||
base_domain: ${{ vars.DEV_BASE_DOMAIN }}
|
||||
base_domain: ${{ vars.PROD_DOMAIN }}
|
||||
secrets: inherit
|
||||
|
||||
frontend:
|
||||
@@ -77,7 +77,7 @@ jobs:
|
||||
|
||||
- name: Helm upgrade/install PR preview
|
||||
env:
|
||||
DEV_BASE_DOMAIN: ${{ secrets.BASE_DOMAIN }}
|
||||
DEV_BASE_DOMAIN: ${{ vars.BASE_DOMAIN }}
|
||||
RABBITMQ_PASSWORD: ${{ secrets.PROD_RABBITMQ_PASSWORD }}
|
||||
DB_PASSWORD: ${{ secrets.PROD_DB_PASSWORD }}
|
||||
DIGEST: ${{ needs.build.outputs.digest }}
|
||||
@@ -90,9 +90,9 @@ jobs:
|
||||
PR=${{ github.event.pull_request.number }}
|
||||
RELEASE=myapp-pr-$PR
|
||||
NAMESPACE=pr-$PR
|
||||
helm upgrade --install "$RELEASE" ./7project/charts/myapp-chart \
|
||||
helm upgrade --install "$RELEASE" ./7project/src/charts/myapp-chart \
|
||||
-n "$NAMESPACE" --create-namespace \
|
||||
-f 7project/charts/myapp-chart/values-dev.yaml \
|
||||
-f 7project/src/charts/myapp-chart/values-dev.yaml \
|
||||
--set prNumber="$PR" \
|
||||
--set deployment="pr-$PR" \
|
||||
--set domain="$DOMAIN" \
|
||||
|
||||
14
.github/workflows/deploy-prod.yaml
vendored
14
.github/workflows/deploy-prod.yaml
vendored
@@ -4,9 +4,9 @@ on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
paths:
|
||||
- 7project/backend/**
|
||||
- 7project/frontend/**
|
||||
- 7project/charts/myapp-chart/**
|
||||
- ../../7project/src/backend/**
|
||||
- ../../7project/src/frontend/**
|
||||
- ../../7project/src/charts/myapp-chart/**
|
||||
- .github/workflows/deploy-prod.yaml
|
||||
- .github/workflows/build-image.yaml
|
||||
- .github/workflows/frontend-pages.yml
|
||||
@@ -27,15 +27,17 @@ jobs:
|
||||
|
||||
build:
|
||||
name: Build and push image (reusable)
|
||||
needs: [test]
|
||||
uses: ./.github/workflows/build-image.yaml
|
||||
with:
|
||||
mode: prod
|
||||
image_repo: lukastrkan/cc-app-demo
|
||||
context: 7project/backend
|
||||
context: 7project/src/backend
|
||||
secrets: inherit
|
||||
|
||||
get_urls:
|
||||
name: Generate Production URLs
|
||||
needs: [test]
|
||||
uses: ./.github/workflows/url_generator.yml
|
||||
with:
|
||||
mode: prod
|
||||
@@ -101,9 +103,9 @@ jobs:
|
||||
SMTP_FROM: ${{ secrets.SMTP_FROM }}
|
||||
UNIRATE_API_KEY: ${{ secrets.UNIRATE_API_KEY }}
|
||||
run: |
|
||||
helm upgrade --install myapp ./7project/charts/myapp-chart \
|
||||
helm upgrade --install myapp ./7project/src/charts/myapp-chart \
|
||||
-n prod --create-namespace \
|
||||
-f 7project/charts/myapp-chart/values-prod.yaml \
|
||||
-f 7project/src/charts/myapp-chart/values-prod.yaml \
|
||||
--set deployment="prod" \
|
||||
--set domain="$DOMAIN" \
|
||||
--set domain_scheme="$DOMAIN_SCHEME" \
|
||||
|
||||
6
.github/workflows/frontend-pages.yml
vendored
6
.github/workflows/frontend-pages.yml
vendored
@@ -35,7 +35,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 7project/frontend
|
||||
working-directory: 7project/src/frontend
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
with:
|
||||
node-version: '20'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: 7project/frontend/package-lock.json
|
||||
cache-dependency-path: 7project/src/frontend/package-lock.json
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
@@ -61,7 +61,7 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: frontend-dist
|
||||
path: 7project/frontend/dist
|
||||
path: 7project/src/frontend/dist
|
||||
|
||||
deploy:
|
||||
name: Deploy to Cloudflare Pages
|
||||
|
||||
10
.github/workflows/run-tests.yml
vendored
10
.github/workflows/run-tests.yml
vendored
@@ -46,21 +46,21 @@ jobs:
|
||||
|
||||
- name: Add test dependencies to requirements
|
||||
run: |
|
||||
echo "pytest==8.4.2" >> ./7project/backend/requirements.txt
|
||||
echo "pytest-asyncio==1.2.0" >> ./7project/backend/requirements.txt
|
||||
echo "pytest==8.4.2" >> ./7project/src/backend/requirements.txt
|
||||
echo "pytest-asyncio==1.2.0" >> ./7project/src/backend/requirements.txt
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r ./7project/backend/requirements.txt
|
||||
pip install -r ./7project/src/backend/requirements.txt
|
||||
|
||||
- name: Run Alembic migrations
|
||||
run: |
|
||||
alembic upgrade head
|
||||
working-directory: ./7project/backend
|
||||
working-directory: ./7project/src/backend
|
||||
|
||||
- name: Run tests with pytest
|
||||
env:
|
||||
PYTEST_RUN_CONFIG: "True"
|
||||
run: pytest
|
||||
working-directory: ./7project/backend
|
||||
working-directory: ./7project/src/backend
|
||||
8
.idea/.gitignore
generated
vendored
Normal file
8
.idea/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
# Default ignored files
|
||||
/shelf/
|
||||
/workspace.xml
|
||||
# Editor-based HTTP Client requests
|
||||
/httpRequests/
|
||||
# Datasource local storage ignored files
|
||||
/dataSources/
|
||||
/dataSources.local.xml
|
||||
16
7project/.gitignore
vendored
16
7project/.gitignore
vendored
@@ -1,8 +1,8 @@
|
||||
/tofu/controlplane.yaml
|
||||
/tofu/kubeconfig
|
||||
/tofu/talosconfig
|
||||
/tofu/terraform.tfstate
|
||||
/tofu/terraform.tfstate.backup
|
||||
/tofu/worker.yaml
|
||||
/tofu/.terraform.lock.hcl
|
||||
/tofu/.terraform/
|
||||
/src/tofu/controlplane.yaml
|
||||
/src/tofu/kubeconfig
|
||||
/src/tofu/talosconfig
|
||||
/src/tofu/terraform.tfstate
|
||||
/src/tofu/terraform.tfstate.backup
|
||||
/src/tofu/worker.yaml
|
||||
/src/tofu/.terraform.lock.hcl
|
||||
/src/tofu/.terraform/
|
||||
|
||||
8
7project/.idea/.gitignore
generated
vendored
Normal file
8
7project/.idea/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
# Default ignored files
|
||||
/shelf/
|
||||
/workspace.xml
|
||||
# Editor-based HTTP Client requests
|
||||
/httpRequests/
|
||||
# Datasource local storage ignored files
|
||||
/dataSources/
|
||||
/dataSources.local.xml
|
||||
@@ -1,5 +0,0 @@
|
||||
export const BACKEND_URL: string =
|
||||
import.meta.env.VITE_BACKEND_URL ?? '';
|
||||
|
||||
export const VITE_UNIRATE_API_KEY: string =
|
||||
import.meta.env.VITE_UNIRATE_API_KEY ?? 'wYXMiA0bz8AVRHtiS9hbKIr4VP3k5Qff8XnQdKQM45YM3IwFWP6y73r3KMkv1590';
|
||||
@@ -1,9 +1,9 @@
|
||||
# Personal finance tracker
|
||||
|
||||
> **Instructions**:
|
||||
<!--- **Instructions**:
|
||||
> This template provides the structure for your project report.
|
||||
> Replace the placeholder text with your actual content.
|
||||
> Remove instructions that are not relevant for your project, but leave the headings along with a (NA) label.
|
||||
> Remove instructions that are not relevant for your project, but leave the headings along with a (NA) label. -->
|
||||
|
||||
## Project Overview
|
||||
|
||||
@@ -12,272 +12,446 @@
|
||||
**Group Members**:
|
||||
|
||||
- 289229, Lukáš Trkan, lukastrkan
|
||||
- 289258, Dejan Ribarovski, derib2613, ribardej
|
||||
- 289258, Dejan Ribarovski, ribardej (derib2613)
|
||||
|
||||
**Brief Description**:
|
||||
Our application is a finance tracker, so a person can easily track his cash flow
|
||||
through multiple bank accounts. Person can label transactions with custom categories
|
||||
and later filter by them.
|
||||
Our application allows users to easily track their cash flow
|
||||
through multiple bank accounts. Users can label their transactions with custom categories that can be later used for
|
||||
filtering and visualization. New transactions are automatically fetched in the background.
|
||||
|
||||
## Architecture Overview
|
||||
Our system is a full‑stack web application composed of a React frontend, a FastAPI backend, a PostgreSQL database, and asynchronous background workers powered by Celery with RabbitMQ. Redis is available for caching/kv and may be used by Celery as a result backend. The backend exposes REST endpoints for authentication (email/password and OAuth), users, categories, and transactions. A thin controller layer (FastAPI routers) lives under app/api. Infrastructure for Kubernetes is provided via OpenTofu (Terraform‑compatible) modules and the application is packaged via a Helm chart.
|
||||
|
||||
Our system is a full‑stack web application composed of a React frontend, a FastAPI backend,
|
||||
a asynchronousMariaDB database with Maxscale, and background workers powered by Celery with RabbitMQ.
|
||||
The backend exposes REST endpoints for authentication (email/password and OAuth), users, categories,
|
||||
transactions, exchange rates and bank APIs. Infrastructure for Kubernetes is managed via Terraform/OpenTofu and
|
||||
the application is packaged via a Helm chart. This all is deployed on private TalosOS cluster running on Proxmox VE with
|
||||
CI/CD and with public access over Cloudflare tunnels. Static files for frontend are served via Cloudflare pages.
|
||||
Other services deployed in the cluster includes Longhorn for persistent storage, Prometheus with Grafana for monitoring.
|
||||
|
||||
### High-Level Architecture
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
proc_queue[Message Queue] --> proc_queue_worker[Worker Service]
|
||||
proc_queue_worker --> ext_mail[(Email Service)]
|
||||
proc_cron[Task planner] --> proc_queue
|
||||
proc_queue_worker --> ext_bank[(Bank API)]
|
||||
proc_queue_worker --> db
|
||||
client[Client/Frontend] <--> svc[Backend API]
|
||||
flowchart TB
|
||||
n3(("User")) <--> client["Frontend"]
|
||||
proc_queue["Message Queue"] --> proc_queue_worker["Worker Service"]
|
||||
proc_queue_worker -- SMTP --> ext_mail[("Email Service")]
|
||||
proc_queue_worker <-- HTTP request/response --> ext_bank[("Bank API")]
|
||||
proc_queue_worker <--> db[("Database")]
|
||||
proc_cron["Cron"] <-- HTTP request/response --> svc["Backend API"]
|
||||
svc --> proc_queue
|
||||
svc <--> db[(Database)]
|
||||
n2["Cloudflare tunnel"] <-- HTTP request/response --> svc
|
||||
svc <--> db
|
||||
svc <-- HTTP request/response --> api[("UniRate API")]
|
||||
client <-- HTTP request/response --> n2
|
||||
```
|
||||
|
||||
The workflow works in the following way:
|
||||
|
||||
- Client connects to the frontend. After login, frontend automatically fetches the stored transactions from
|
||||
the database via the backend API
|
||||
- When the client opts for fetching new transactions via the Bank API, the backend delegates the task
|
||||
to a background worker service via the Message queue.
|
||||
the database via the backend API and currency rates from UniRate API.
|
||||
- When the client opts for fetching new transactions via the Bank API, cron will trigger periodic fetching
|
||||
using background worker.
|
||||
- After successful load, these transactions are stored to the database and displayed to the client
|
||||
- There is also a Task planner, that executes periodic tasks, like fetching new transactions automatically from the Bank API
|
||||
|
||||
### Features
|
||||
|
||||
- The stored transactions are encrypted in the DB for security reasons.
|
||||
- For every pull request the full APP is deployed on a separate URL and the tests are run by github CI/CD
|
||||
- On every push to main, the production app is automatically updated
|
||||
- UI is responsive for mobile devices
|
||||
- Slow operations (emails, transactions fetching) are handled
|
||||
in the background by Celery workers.
|
||||
- App is monitored using prometheus metrics endpoint and metrics are shown in Grafana dashboard.
|
||||
|
||||
### Components
|
||||
|
||||
- Frontend (frontend/): React + TypeScript app built with Vite. Talks to the backend via REST, handles login/registration, shows latest transactions, filtering, and allows adding transactions.
|
||||
- Backend API (backend/app): FastAPI app with routers under app/api for auth, categories, and transactions. Uses FastAPI Users for auth (JWT + OAuth), SQLAlchemy ORM, and Pydantic v2 schemas.
|
||||
- Worker service (backend/app/workers): Celery worker handling asynchronous tasks (e.g., sending verification emails, future background processing).
|
||||
- Database (PostgreSQL): Persists users, categories, transactions; schema managed by Alembic migrations.
|
||||
- Message Queue (RabbitMQ): Transports background jobs from the API to the worker.
|
||||
- Cache/Result Store (Redis): Available for caching or Celery result backend.
|
||||
- Infrastructure as Code (tofu/): OpenTofu modules provisioning cluster services (RabbitMQ, Redis, Argo CD, cert-manager, Cloudflare tunnel, etc.).
|
||||
- Frontend (frontend/): React + TypeScript app built with Vite. Talks to the backend via REST, handles
|
||||
login/registration, shows latest transactions, filtering, and allows adding transactions.
|
||||
- Backend API (backend/app): FastAPI app with routers under app/api for auth, users, categories, transactions, exchange
|
||||
rates and bankAPI. Uses FastAPI Users for auth (JWT + OAuth), SQLAlchemy ORM, and Pydantic v2 schemas.
|
||||
- Worker service (backend/app/workers): Celery worker handling background tasks (emails, transactions fetching).
|
||||
- Database (MariaDB with Maxscale): Persists users, categories, transactions; schema managed by Alembic migrations.
|
||||
- Message Queue (RabbitMQ): Queues background tasks for Celery workers.
|
||||
- Infrastructure as Code (tofu/): OpenTofu modules provisioning cluster services (RabbitMQ, Redis, Cloudflare tunnel,
|
||||
etc.).
|
||||
- Deployment Chart (charts/myapp-chart/): Helm chart to deploy the application to Kubernetes.
|
||||
|
||||
### Technologies Used
|
||||
|
||||
- Backend: Python, FastAPI, FastAPI Users, SQLAlchemy, Pydantic, Alembic, Celery
|
||||
- Frontend: React, TypeScript, Vite
|
||||
- Database: MariaDB (Maxscale)
|
||||
- Database: MariaDB with Maxscale
|
||||
- Background jobs: RabbitMQ, Celery
|
||||
- Containerization/Orchestration: Docker, Docker Compose (dev), Kubernetes, Helm
|
||||
- IaC/Platform: Proxmox, Talos, Cloudflare pages, OpenTofu (Terraform), cert-manager, MetalLB, Cloudflare Tunnel, Prometheus, Loki
|
||||
- IaC/Platform: Proxmox, Talos, Cloudflare pages, OpenTofu (Terraform), cert-manager, MetalLB, Cloudflare Tunnel,
|
||||
Prometheus, Loki
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### System Requirements
|
||||
|
||||
- Operating System (dev): Linux, macOS, or Windows with Docker support
|
||||
- Operating System (prod): Linux with kubernetes
|
||||
- Minimum RAM: 4 GB (8 GB recommended for running backend, frontend, and database together)
|
||||
- Storage: 4 GB free (Docker images may require additional space)
|
||||
#### Development
|
||||
|
||||
- Minimum RAM: 8 GB
|
||||
- Storage: 10 GB+ free
|
||||
|
||||
#### Production
|
||||
|
||||
- 1 + 4 nodes
|
||||
- CPU: 4 cores
|
||||
- RAM: 8 GB
|
||||
- Storage: 200 GB
|
||||
|
||||
### Required Software
|
||||
|
||||
- Docker Desktop or Docker Engine
|
||||
#### Development
|
||||
|
||||
- Docker
|
||||
- Docker Compose
|
||||
- Node.js and npm
|
||||
- Python 3.12+
|
||||
- Python 3.12
|
||||
- MariaDB 11
|
||||
- Helm 3.12+ and kubectl 1.29+
|
||||
|
||||
#### Production
|
||||
|
||||
##### Minimal:
|
||||
|
||||
- domain name with Cloudflare`s nameservers - tunnel, pages
|
||||
- Kubernetes cluster
|
||||
- kubectl
|
||||
- Helm
|
||||
- OpenTofu
|
||||
|
||||
### Environment Variables (common)
|
||||
##### Our setup specifics:
|
||||
|
||||
# TODO: UPDATE
|
||||
- Backend: SECRET, FRONTEND_URL, BACKEND_URL, DATABASE_URL, RABBITMQ_URL, REDIS_URL
|
||||
- Proxmox VE
|
||||
- TalosOS cluster
|
||||
- talosctl
|
||||
- GitHub self-hosted runner with access to the cluster
|
||||
- TailScale for remote access to cluster
|
||||
|
||||
- OAuth vars (Backend): MOJEID_CLIENT_ID/SECRET, BANKID_CLIENT_ID/SECRET (optional)
|
||||
- Frontend: VITE_BACKEND_URL
|
||||
### Environment Variables
|
||||
|
||||
#### Backend
|
||||
|
||||
- `MOJEID_CLIENT_ID`, `MOJEID_CLIENT_SECRET` \- OAuth client ID and secret for
|
||||
MojeID - https://www.mojeid.cz/en/provider/
|
||||
- `BANKID_CLIENT_ID`, `BANKID_CLIENT_SECRET` \- OAuth client ID and secret for BankID - https://developer.bankid.cz/
|
||||
- `CSAS_CLIENT_ID`, `CSAS_CLIENT_SECRET` \- OAuth client ID and secret for Česká
|
||||
spořitelna - https://developers.erstegroup.com/docs/apis/bank.csas
|
||||
- `DATABASE_URL`(or `MARIADB_HOST`, `MARIADB_PORT`, `MARIADB_DB`, `MARIADB_USER`, `MARIADB_PASSWORD`) \- MariaDB
|
||||
connection details
|
||||
- `RABBITMQ_USERNAME`, `RABBITMQ_PASSWORD` \- credentials for RabbitMQ
|
||||
- `SENTRY_DSN` \- Sentry DSN for error reporting
|
||||
- `DB_ENCRYPTION_KEY` \- symmetric key for encrypting sensitive data in the database
|
||||
- `SMTP_HOST`, `SMTP_PORT`, `SMTP_USERNAME`, `SMTP_PASSWORD`, `SMTP_USE_TLS`, `SMTP_USE_SSL`, `SMTP_FROM` \- SMTP
|
||||
configuration (host, port, auth credentials, TLS/SSL options, sender).
|
||||
- `UNIRATE_API_KEY` \- API key for UniRate.
|
||||
|
||||
#### Frontend
|
||||
|
||||
- `VITE_BACKEND_URL` \- URL of the backend API
|
||||
|
||||
### Dependencies (key libraries)
|
||||
Backend: FastAPI, fastapi-users, SQLAlchemy, pydantic v2, Alembic, Celery, uvicorn
|
||||
|
||||
Backend: FastAPI, fastapi-users, SQLAlchemy, pydantic v2, Alembic, Celery, uvicorn, pytest
|
||||
Frontend: React, TypeScript, Vite
|
||||
|
||||
## Local development
|
||||
|
||||
You can run the project with Docker Compose and Python virtual environment for testing and dev purposes
|
||||
You can run the project with Docker Compose and Python virtual environment for testing and development purposes
|
||||
|
||||
### 1) Clone the Repository
|
||||
|
||||
```bash
|
||||
git clone https://github.com/dat515-2025/Group-8.git
|
||||
cd 7project
|
||||
cd Group-8/7project/src
|
||||
```
|
||||
|
||||
### 2) Install dependencies
|
||||
|
||||
Backend
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
Frontend
|
||||
|
||||
### 3) Run Docker containers
|
||||
|
||||
```bash
|
||||
# In 7project/frontend
|
||||
npm install
|
||||
cd ..
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
### 3) Manual Local Run
|
||||
### 4) Prepare the database
|
||||
|
||||
Backend
|
||||
```bash
|
||||
# From the 7project/ directory
|
||||
docker compose up --build
|
||||
# This starts: MariaDB, RabbitMQ
|
||||
|
||||
# Set environment variables (or create .env file)
|
||||
# TODO: fix
|
||||
export SECRET=CHANGE_ME_SECRET
|
||||
export FRONTEND_DOMAIN_SCHEME=http://localhost:5173
|
||||
export BANKID_CLIENT_ID=CHANGE_ME
|
||||
export BANKID_CLIENT_SECRET=CHANGE_ME
|
||||
export CSAS_CLIENT_ID=CHANGE_ME
|
||||
export CSAS_CLIENT_SECRET=CHANGE_ME
|
||||
export MOJEID_CLIENT_ID=CHANGE_ME
|
||||
export MOJEID_CLIENT_SECRET=CHANGE_ME
|
||||
# Apply DB migrations (Alembic)
|
||||
# From 7project
|
||||
bash upgrade_database.sh
|
||||
```
|
||||
|
||||
# Run API
|
||||
### 5) Run backend
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
|
||||
#TODO: set env variables
|
||||
uvicorn app.app:fastApi --reload --host 0.0.0.0 --port 8000
|
||||
```
|
||||
|
||||
### 6) Run Celery worker (optional, in another terminal)
|
||||
|
||||
```bash
|
||||
cd Group-8/7project/backend
|
||||
source .venv/bin/activate
|
||||
celery -A app.celery_app.celery_app worker -l info
|
||||
```
|
||||
|
||||
Frontend
|
||||
### 7) Install frontend dependencies and run
|
||||
|
||||
```bash
|
||||
# Configure backend URL for dev
|
||||
echo 'VITE_BACKEND_URL=http://127.0.0.1:8000' > .env
|
||||
cd ../frontend
|
||||
npm i
|
||||
npm run dev
|
||||
# Open http://localhost:5173
|
||||
```
|
||||
|
||||
- Backend default: http://127.0.0.1:8000 (OpenAPI at /docs)
|
||||
- Frontend default: http://localhost:5173
|
||||
- Backend available at: http://127.0.0.1:8000 (OpenAPI at /docs)
|
||||
- Frontend available at: http://localhost:5173
|
||||
|
||||
## Build Instructions
|
||||
|
||||
### Backend
|
||||
|
||||
```bash
|
||||
# run in project7/backend
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t your_container_registry/your_name --push .
|
||||
cd 7project/backend
|
||||
# Dont forget to set correct image tag with your registry and name
|
||||
# For example lukastrkan/cc-app-demo or gitea.ltrk.dev/lukas/cc-app-demo
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t CHANGE_ME --push .
|
||||
```
|
||||
|
||||
### Frontend
|
||||
|
||||
```bash
|
||||
# run in project7/frontend
|
||||
cd project7/frontend
|
||||
npm ci
|
||||
npm run build
|
||||
```
|
||||
|
||||
## Deployment Instructions
|
||||
|
||||
### Setup Cluster
|
||||
Deployment should work on any Kubernetes cluster. However, we are using 4 TalosOS virtual machines (1 control plane, 3 workers)
|
||||
|
||||
Deployment should work on any Kubernetes cluster. However, we are using 4 TalosOS virtual machines (1 control plane, 3
|
||||
workers)
|
||||
running on top of Proxmox VE.
|
||||
|
||||
1) Create 4 VMs with TalosOS
|
||||
1) Create at least 4 VMs with TalosOS (4 cores, 8 GB RAM, 200 GB disk)
|
||||
2) Install talosctl for your OS: https://docs.siderolabs.com/talos/v1.10/getting-started/talosctl
|
||||
3) Generate Talos config
|
||||
```bash
|
||||
# TODO: add commands
|
||||
```
|
||||
4) Edit the generated worker.yaml
|
||||
- add google container registry mirror
|
||||
- add modules from config generator
|
||||
- add extramounts for persistent storage
|
||||
- add kernel modules
|
||||
4) Navigate to tofu directory
|
||||
|
||||
5) Apply the config to the VMs
|
||||
```bash
|
||||
#TODO: add config apply commands
|
||||
cd 7project/tofu
|
||||
````
|
||||
|
||||
5) Set IP addresses in environment variables
|
||||
|
||||
```bash
|
||||
CONTROL_PLANE_IP=<control-plane-ip>
|
||||
WORKER1_IP=<worker1-ip>
|
||||
WORKER2_IP=<worker2-ip>
|
||||
WORKER3_IP=<worker3-ip>
|
||||
WORKER4_IP=<worker4-ip>
|
||||
....
|
||||
```
|
||||
|
||||
6) Verify the cluster is up
|
||||
6) Create config files
|
||||
|
||||
```bash
|
||||
# change my-cluster to your desired cluster name
|
||||
talosctl gen config my-cluster https://$CONTROL_PLANE_IP:6443
|
||||
```
|
||||
|
||||
7) Export kubeconfig
|
||||
```bash
|
||||
# TODO: add export command
|
||||
7) Edit the generated configs
|
||||
|
||||
Apply the following changes to `worker.yaml`:
|
||||
|
||||
1) Add mounts for persistent storage to `machine.kubelet.extraMounts` section:
|
||||
|
||||
```yaml
|
||||
extraMounts:
|
||||
- destination: /var/lib/longhorn
|
||||
type: bindind.
|
||||
source: /var/lib/longhorn
|
||||
options:
|
||||
- bind
|
||||
- rshared
|
||||
- rw
|
||||
```
|
||||
|
||||
2) Change `machine.install.image` to image with extra modules:
|
||||
|
||||
```yaml
|
||||
image: factory.talos.dev/metal-installer/88d1f7a5c4f1d3aba7df787c448c1d3d008ed29cfb34af53fa0df4336a56040b:v1.11.1
|
||||
```
|
||||
|
||||
or you can use latest image generated at https://factory.talos.dev with following options:
|
||||
|
||||
- Bare-metal machine
|
||||
- your Talos os version
|
||||
- amd64 architecture
|
||||
- siderolabs/iscsi-tools
|
||||
- siderolabs/util-linux-tools
|
||||
- (Optionally) siderolabs/qemu-guest-agent
|
||||
|
||||
Then copy "Initial Installation" value and paste it to the image field.
|
||||
|
||||
3) Add docker registry mirror to `machine.registries.mirrors` section:
|
||||
|
||||
```yaml
|
||||
registries:
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoints:
|
||||
- https://mirror.gcr.io
|
||||
- https://registry-1.docker.io
|
||||
```
|
||||
|
||||
8) Apply configs to the VMs
|
||||
|
||||
```bash
|
||||
talosctl apply-config --insecure --nodes $CONTROL_PLANE_IP --file controlplane.yaml
|
||||
talosctl apply-config --insecure --nodes $WORKER1_IP --file worker.yaml
|
||||
talosctl apply-config --insecure --nodes $WORKER2_IP --file worker.yaml
|
||||
talosctl apply-config --insecure --nodes $WORKER3_IP --file worker.yaml
|
||||
talosctl apply-config --insecure --nodes $WORKER4_IP --file worker.yaml
|
||||
```
|
||||
|
||||
9) Boostrap the cluster and retrieve kubeconfig
|
||||
|
||||
```bash
|
||||
export TALOSCONFIG=$(pwd)/talosconfig
|
||||
talosctl config endpoint https://$CONTROL_PLANE_IP:6443
|
||||
talosctl config node $CONTROL_PLANE_IP
|
||||
|
||||
talosctl bootstrap
|
||||
|
||||
talosctl kubeconfig .
|
||||
```
|
||||
|
||||
You can now use k8s client like https://headlamp.dev/ with the generated kubeconfig file.
|
||||
|
||||
### Install base services to the cluster
|
||||
|
||||
1) Copy and edit variables
|
||||
|
||||
### Install
|
||||
1) Install base services to cluster
|
||||
```bash
|
||||
cd tofu
|
||||
# copy and edit variables
|
||||
cp terraform.tfvars.example terraform.tfvars
|
||||
# authenticate to your cluster/cloud as needed, then:
|
||||
```
|
||||
|
||||
- `metallb_ip_range` - set to range available in your network for load balancer services
|
||||
- `mariadb_password` - password for internal mariadb user
|
||||
- `mariadb_root_password` - password for root user
|
||||
- `mariadb_user_name` - username for admin user
|
||||
- `mariadb_user_host` - allowed hosts for admin user
|
||||
- `mariadb_user_password` - password for admin user
|
||||
- `metallb_maxscale_ip`, `metallb_service_ip`, `metallb_primary_ip`, `metallb_secondary_ip` - IPs for database
|
||||
cluster,
|
||||
set them to static IPs from the `metallb_ip_range`
|
||||
- `s3_enabled`, `s3_bucket`, `s3_region`, `s3_endpoint`, `s3_key_id`, `s3_key_secret` - S3 compatible storage for
|
||||
backups (optional)
|
||||
- `phpmyadmin_enabled` - set to false if you want to disable phpmyadmin
|
||||
- `rabbitmq-password` - password for RabbitMQ
|
||||
|
||||
- `cloudflare_account_id` - your Cloudflare account ID
|
||||
- `cloudflare_api_token` - your Cloudflare API token with permissions to manage tunnels and DNS
|
||||
- `cloudflare_email` - your Cloudflare account email
|
||||
- `cloudflare_tunnel_name` - name for the tunnel
|
||||
- `cloudflare_domain` - your domain name managed in Cloudflare
|
||||
|
||||
2) Deploy without Cloudflare module first
|
||||
|
||||
```bash
|
||||
tofu init
|
||||
tofu apply -exclude modules.cloudflare
|
||||
```
|
||||
|
||||
3) Deploy rest of the modules
|
||||
|
||||
```bash
|
||||
tofu apply
|
||||
```
|
||||
|
||||
2) Deploy the app using Helm
|
||||
```bash
|
||||
# Set the namespace
|
||||
kubectl create namespace myapp || true
|
||||
### Configure deployment
|
||||
|
||||
# Install/upgrade the chart with required values
|
||||
helm upgrade --install myapp charts/myapp-chart \
|
||||
-n myapp \
|
||||
-f charts/myapp-chart/values.yaml \
|
||||
--set image.backend.repository=myorg/myapp-backend \
|
||||
--set image.backend.tag=latest \
|
||||
--set env.BACKEND_URL="https://myapp.example.com" \
|
||||
--set env.FRONTEND_URL="https://myapp.example.com" \
|
||||
--set env.SECRET="CHANGE_ME_SECRET"
|
||||
```
|
||||
Adjust values to your registry and domain. The chart’s NOTES.txt includes additional examples.
|
||||
|
||||
3) Expose and access
|
||||
- If using Cloudflare Tunnel or an ingress, configure DNS accordingly (see tofu/modules/cloudflare and deployment/tunnel.yaml).
|
||||
- For quick testing without ingress:
|
||||
```bash
|
||||
kubectl -n myapp port-forward deploy/myapp-backend 8000:8000
|
||||
kubectl -n myapp port-forward deploy/myapp-frontend 5173:80
|
||||
```
|
||||
|
||||
### Verification
|
||||
|
||||
```bash
|
||||
# Check pods
|
||||
kubectl -n myapp get pods
|
||||
|
||||
# Backend health
|
||||
curl -i http://127.0.0.1:8000/
|
||||
# OpenAPI
|
||||
open http://127.0.0.1:8000/docs
|
||||
|
||||
# Frontend (if port-forwarded)
|
||||
open http://localhost:5173
|
||||
```
|
||||
1) Create self-hosted runner with access to the cluster or make cluster publicly accessible
|
||||
2) Change `jobs.deploy.runs-on` in `.github/workflows/deploy-prod.yml` and in `.github/workflows/deploy-pr.yaml` to your
|
||||
runner label
|
||||
3) Add variables to GitHub in repository settings:
|
||||
- `PROD_DOMAIN` - base domain for deployments (e.g. ltrk.cz)
|
||||
- `DEV_FRONTEND_BASE_DOMAIN` - base domain for your cloudflare pages
|
||||
4) Add secrets to GitHub in repository settings:
|
||||
- CLOUDFLARE_ACCOUNT_ID - same as in tofu/terraform.tfvars
|
||||
- CLOUDFLARE_API_TOKEN - same as in tofu/terraform.tfvars
|
||||
- DOCKER_USER - your docker registry username
|
||||
- DOCKER_PASSWORD - your docker registry password
|
||||
- KUBE_CONFIG - content of your kubeconfig file for the cluster
|
||||
- PROD_DB_PASSWORD - same as MARIADB_PASSWORD
|
||||
- PROD_RABBITMQ_PASSWORD - same as MARIADB_PASSWORD
|
||||
- PROD_DB_ENCRYPTION_KEY - same as DB_ENCRYPTION_KEY
|
||||
- MOJEID_CLIENT_ID
|
||||
- MOJEID_CLIENT_SECRET
|
||||
- BANKID_CLIENT_ID
|
||||
- BANKID_CLIENT_SECRET
|
||||
- CSAS_CLIENT_ID
|
||||
- CSAS_CLIENT_SECRET
|
||||
- SENTRY_DSN
|
||||
- SMTP_HOST
|
||||
- SMTP_PORT
|
||||
- SMTP_USERNAME
|
||||
- SMTP_PASSWORD
|
||||
- SMTP_FROM
|
||||
- UNIRATE_API_KEY
|
||||
5) On Github open Actions tab, select "Deploy Prod" and run workflow manually
|
||||
|
||||
## Testing Instructions
|
||||
The tests are located in 7project/backend/tests directory
|
||||
If you want to test locally, you have to have the DB running locally as well (start the docker compose in /backend).
|
||||
|
||||
The tests are located in 7project/backend/tests directory. All tests are run by GitHub actions on every pull request and
|
||||
push to main.
|
||||
See the workflow [here](../.github/workflows/run-tests.yml).
|
||||
|
||||
If you want to run the tests locally, the preferred way is to use a [bash script](src/backend/test_locally.sh)
|
||||
that will start a test DB container with [docker compose](src/backend/docker-compose.test.yml) and remove it afterwards.
|
||||
```bash
|
||||
cd backend
|
||||
cd 7project/backend
|
||||
bash test_locally.sh
|
||||
```
|
||||
|
||||
### Unit Tests
|
||||
There are only 3 basic unit tests, since our services logic is very simple
|
||||
|
||||
There are 5 basic unit tests, since our services logic is very simple
|
||||
|
||||
```bash
|
||||
pytest tests/test_unit_user_service.py
|
||||
bash test_locally.sh --only-unit
|
||||
```
|
||||
|
||||
### Integration Tests
|
||||
There are 11 basic unit tests, testing the individual backend API logic
|
||||
|
||||
There are 9 basic unit tests, testing the individual backend API logic
|
||||
|
||||
```bash
|
||||
pytest tests/test_integration_app.py
|
||||
bash test_locally.sh --only-integration
|
||||
```
|
||||
|
||||
### End-to-End Tests
|
||||
There are 7 e2e tests testing more complex app logic
|
||||
|
||||
There are 7 e2e tests, testing more complex app logic
|
||||
|
||||
```bash
|
||||
pytest tests/test_e2e.py
|
||||
bash test_locally.sh --only-e2e
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
@@ -361,14 +535,14 @@ curl -H "Authorization: Bearer $TOKEN" http://127.0.0.1:8000/authenticated-route
|
||||
> Link to the specific commit on GitHub for each contribution.
|
||||
|
||||
| Task/Component | Assigned To | Status | Time Spent | Difficulty | Notes |
|
||||
|-----------------------------------------------------------------------|-------------| ------------- |------------|------------| ----------- |
|
||||
|-------------------------------------------------------------------------------------------------------------------|-------------|----------------|------------|------------|-----------------------------------------------------------------------------------------------------|
|
||||
| [Project Setup & Repository](https://github.com/dat515-2025/Group-8#) | Lukas | ✅ Complete | [X hours] | Medium | [Any notes] |
|
||||
| [Design Document](https://github.com/dat515-2025/Group-8/blob/main/6design/design.md) | Both | ✅ Complete | 4 Hours | Easy | [Any notes] |
|
||||
| [Backend API Development](https://github.com/dat515-2025/Group-8/tree/main/7project/backend/app/api) | Dejan | ✅ Complete | 12 hours | Medium | [Any notes] |
|
||||
| [Database Setup & Models](https://github.com/dat515-2025/Group-8/tree/main/7project/backend/app/models) | Lukas | 🔄 In Progress | [X hours] | Medium | [Any notes] |
|
||||
| [Database Setup & Models](https://github.com/dat515-2025/Group-8/tree/main/7project/backend/app/models) | Lukas | ✅ Complete | [X hours] | Medium | [Any notes] |
|
||||
| [Frontend Development](https://github.com/dat515-2025/Group-8/tree/main/7project/frontend) | Dejan | ✅ Complete | 17 hours | Medium | [Any notes] |
|
||||
| [Docker Configuration](https://github.com/dat515-2025/Group-8/blob/main/7project/compose.yml) | Lukas | ✅ Complete | [X hours] | Easy | [Any notes] |
|
||||
| [Cloud Deployment](https://github.com/dat515-2025/Group-8/blob/main/7project/deployment/app-demo-deployment.yaml) | Lukas | ✅ Complete | [X hours] | Hard | [Any notes] |
|
||||
| [Docker Configuration](https://github.com/dat515-2025/Group-8/blob/main/7project/compose.yml) | Lukas | ✅ Complete | 3 hours | Easy | [Any notes] |
|
||||
| [Cloud Deployment](https://github.com/dat515-2025/Group-8/blob/main/7project/deployment/app-demo-deployment.yaml) | Lukas | ✅ Complete | [X hours] | Hard | Using Talos cluster running in proxmox - easy snapshots etc. Frontend deployed at Cloudflare pages. |
|
||||
| [Testing Implementation](https://github.com/dat515-2025/group-name) | Dejan | ✅ Complete | 16 hours | Medium | [Any notes] |
|
||||
| [Documentation](https://github.com/dat515-2025/group-name) | Both | 🔄 In Progress | [X hours] | Easy | [Any notes] |
|
||||
| [Presentation Video](https://github.com/dat515-2025/group-name) | Both | ❌ Not Started | [X hours] | Medium | [Any notes] |
|
||||
@@ -381,30 +555,46 @@ curl -H "Authorization: Bearer $TOKEN" http://127.0.0.1:8000/authenticated-route
|
||||
|
||||
### [Lukáš]
|
||||
|
||||
| Date | Activity | Hours | Description |
|
||||
|----------------|---------------------|------------|----------------------------------------------------|
|
||||
| 4.10 to 10.10 | Initial Setup | 40 | Repository setup, project structure, cluster setup |
|
||||
| 14.10 to 16.10 | Backend Development | 12 | Implemented user authentication - oauth |
|
||||
| 8.10 to 12.10 | CI/CD | 10 | Created database schema and models |
|
||||
| [Date] | Testing | [X.X] | Unit tests for API endpoints |
|
||||
| [Date] | Documentation | [X.X] | Updated README and design doc |
|
||||
| **Total** | | **[XX.X]** | |
|
||||
## Hour Sheet
|
||||
|
||||
**Name:** Lukáš Trkan
|
||||
|
||||
| Date | Activity | Hours | Description | Representative Commit / PR |
|
||||
|:----------------|:----------------------------|:--------|:------------------------------------------------------------------------------------|:------------------------------------------------------|
|
||||
| 18.9. - 19.9. | Initial Setup & Design | 40 | Repository init, system design diagrams, basic Terraform setup | `feat(infrastructure): add basic terraform resources` |
|
||||
| 20.9. - 5.10. | Core Infrastructure & CI/CD | 12 | K8s setup (ArgoCD), CI/CD workflows, RabbitMQ, Redis, Celery workers, DB migrations | `PR #2`, `feat(infrastructure): add rabbitmq cluster` |
|
||||
| 6.10. - 9.10. | Frontend Infra & DB | 5 | Deployed frontend to Cloudflare, setup metrics, created database models | `PR #16` (Cloudflare), `PR #19` (DB structure) |
|
||||
| 10.10. - 11.10. | Backend | 5 | Implemented OAuth support (MojeID, BankID) | `feat(auth): add support for OAuth and MojeID` |
|
||||
| 12.10. | Infrastructure | 2 | Added database backups | `feat(infrastructure): add backups` |
|
||||
| 16.10. | Infrastructure | 4 | Implemented secrets management, fixed deployment/env variables | `PR #29` (Deployment envs) |
|
||||
| 17.10. | Monitoring | 1 | Added Sentry logging | `feat(app): add sentry loging` |
|
||||
| 21.10. - 22.10. | Backend | 8 | Added ČSAS bank connection | `PR #32` (Fix React OAuth) |
|
||||
| 29.10. - 30.10. | Backend | 5 | Implemented transaction encryption, add bank scraping | `PR #39` (CSAS Scraping) |
|
||||
| 30.10. | Monitoring | 6 | Implemented Loki logging and basic Prometheus metrics | `PR #42` (Prometheus metrics) |
|
||||
| 9.11. | Monitoring | 2 | Added custom Prometheus metrics | `PR #46` (Prometheus custom metrics) |
|
||||
| 11.11. | Tests | 1 | Investigated and fixed broken Pytest environment | `fix(tests): set pytest env` |
|
||||
| 11.11. - 12.11. | Features & Deployment | 6 | Added cron support, email sender service, updated workers & image | `PR #49` (Email), `PR #50` (Update workers) |
|
||||
| 18.9 - 14.11 | Documentation | 8 | Updated report.md, design docs, and tfvars.example | `Create design.md`, `update report` |
|
||||
| **Total** | | **105** | | |
|
||||
|
||||
### Dejan
|
||||
|
||||
| Date | Activity | Hours | Description |
|
||||
|-----------------|----------------------|--------|---------------------------------------------------------------|
|
||||
| 25.9. | Design | 2 | 6design |
|
||||
| 9.10 to 11.10. | Backend APIs | 12 | Implemented Backend APIs |
|
||||
| 13.10 to 15.10. | Frontend Development | 8 | Created user interface mockups |
|
||||
| Continually | Documentation | 6 | Documenting the dev process |
|
||||
| 21.10 to 23.10 | Tests, frontend | 10 | Test basics, balance charts, and frontend improvement |
|
||||
| 28.10 to 30.10 | CI | 6 | Integrated tests with test database setup on github workflows |
|
||||
| 28.10 to 30.10 | Frontend | 7 | UI improvements and exchange rate API integration |
|
||||
| 4.11 to 6.11 | Tests | 6 | Test fixes improvement, more integration and e2e |
|
||||
| 4.11 to 6.11 | Frontend | 6 | Fixes, Improved UI, added support for mobile devices |
|
||||
| **Total** | | **63** | |
|
||||
|
||||
| Date | Activity | Hours | Description | Representative Commit / PR |
|
||||
|:----------------|:-------------------------|:-------|:--------------------------------------------------------------|:---------------------------------------------------------|
|
||||
| 25.9. | Design | 2 | 6design | |
|
||||
| 9.10 to 11.10. | Backend APIs | 14 | Implemented Backend APIs | `PR #26`, `20-create-a-controller-layer-on-backend-side` |
|
||||
| 13.10 to 15.10. | Frontend Development | 8 | Created user interface mockups | `PR #28`, `frontend basics` |
|
||||
| Continually | Documentation | 8 | Documenting the dev process | |
|
||||
| 21.10 to 23.10 | Tests, frontend | 10 | Test basics, balance charts, and frontend improvement | `PR #31`, `30 create tests and set up a GitHub pipeline` |
|
||||
| 28.10 to 30.10 | CI | 6 | Integrated tests with test database setup on github workflows | `PR #28`, `frontend basics` |
|
||||
| 28.10 to 30.10 | Frontend | 8 | UI improvements and exchange rate API integration | `PR #28`, `frontend basics` |
|
||||
| 4.11 to 6.11 | Tests | 6 | Test fixes improvement, more integration and e2e | `PR #28`, `frontend basics` |
|
||||
| 4.11 to 6.11 | Frontend | 6 | Fixes, Improved UI, added support for mobile devices | `PR #28`, `frontend basics` |
|
||||
| 11.11 | Backend APIs | 4 | Moved rates API, mock bank to Backend, few fixes | `PR #28`, `frontend basics` |
|
||||
| 11.11 to 12.11 | Tests | 3 | Local testing DB container, few fixes | `PR #28`, `frontend basics` |
|
||||
| 12.11 | Frontend | 3 | Enabled multiple transaction edits at once, CSAS button state | `PR #28`, `frontend basics` |
|
||||
| 13.11 | Video | 3 | Video | |
|
||||
| **Total** | | **81** | | |
|
||||
|
||||
### Group Total: [XXX.X] hours
|
||||
|
||||
@@ -418,19 +608,46 @@ curl -H "Authorization: Bearer $TOKEN" http://127.0.0.1:8000/authenticated-route
|
||||
|
||||
### Challenges Faced
|
||||
|
||||
[Describe the main challenges and how you overcame them]
|
||||
#### Slow cluster performance
|
||||
|
||||
This was caused by single SATA SSD disk running all VMs. This was solved by adding second NVMe disk just for Talos VMs.
|
||||
|
||||
#### Stucked IaC deployment
|
||||
|
||||
If the deployed module (helm chart for example) was not configured properly, it would get stuck and timeout resulting in
|
||||
namespace that cannot be deleted.
|
||||
This was solved by using snapshots in Proxmox and restoring if this happened.
|
||||
|
||||
### If We Did This Again
|
||||
|
||||
#### Different framework
|
||||
|
||||
FastAPI lacks usable build in support for database migrations and implementing Alembic was a bit tricky.
|
||||
Tricky was also integrating FastAPI auth system with React frontend, since there is no official project template.
|
||||
Using .NET (which we considered initially) would probably solve these issues.
|
||||
|
||||
[What would you do differently? What worked well that you'd keep?]
|
||||
|
||||
### Individual Growth
|
||||
|
||||
#### [Team Member 1 Name]
|
||||
#### [Lukas]
|
||||
|
||||
This course finally forced me to learn kubernetes (been on by TODO list for at least 3 years).
|
||||
I had some prior experience with terraform/opentofu from work but this improved by understanding of it.
|
||||
|
||||
The biggest challenge for me was time tracking since I am used to tracking to projects, not to tasks.
|
||||
(I am bad even at that :) ).
|
||||
|
||||
It was also interesting experience to be the one responsible for the initial project structure/design/setup
|
||||
used not only by myself.
|
||||
|
||||
[Personal reflection on growth, challenges, and learning]
|
||||
|
||||
#### [Team Member 2 Name]
|
||||
#### [Dejan]
|
||||
Since I do not have a job, this project was probably the most complex one I have ever worked on.
|
||||
It was also the first school project where I was encouraged to use AI.
|
||||
|
||||
Lukas
|
||||
|
||||
[Personal reflection on growth, challenges, and learning]
|
||||
|
||||
@@ -438,4 +655,4 @@ curl -H "Authorization: Bearer $TOKEN" http://127.0.0.1:8000/authenticated-route
|
||||
---
|
||||
|
||||
**Report Completion Date**: [Date]
|
||||
**Last Updated**: 15.10.2025
|
||||
**Last Updated**: 13.11.2025
|
||||
8
7project/src/backend/.idea/.gitignore
generated
vendored
Normal file
8
7project/src/backend/.idea/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
# Default ignored files
|
||||
/shelf/
|
||||
/workspace.xml
|
||||
# Editor-based HTTP Client requests
|
||||
/httpRequests/
|
||||
# Datasource local storage ignored files
|
||||
/dataSources/
|
||||
/dataSources.local.xml
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM python:3.11-slim
|
||||
FROM python:3.11-trixie
|
||||
|
||||
WORKDIR /app
|
||||
COPY requirements.txt .
|
||||
@@ -1,10 +1,11 @@
|
||||
import uuid
|
||||
from typing import Optional
|
||||
from typing import Optional, Dict, Any
|
||||
from fastapi_users import schemas
|
||||
|
||||
class UserRead(schemas.BaseUser[uuid.UUID]):
|
||||
first_name: Optional[str] = None
|
||||
last_name: Optional[str] = None
|
||||
config: Optional[Dict[str, Any]] = None
|
||||
|
||||
class UserCreate(schemas.BaseUserCreate):
|
||||
first_name: Optional[str] = None
|
||||
@@ -1,5 +1,6 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from os.path import dirname, join
|
||||
from time import strptime
|
||||
from uuid import UUID
|
||||
@@ -55,7 +56,7 @@ def _load_mock_bank_transactions(user_id: UUID) -> None:
|
||||
|
||||
transactions = []
|
||||
with httpx.Client() as client:
|
||||
response = client.get("http://127.0.0.1:8000/mock-bank/scrape")
|
||||
response = client.get(f"{os.getenv('APP_POD_URL')}/mock-bank/scrape")
|
||||
if response.status_code != httpx.codes.OK:
|
||||
return
|
||||
for transaction in response.json():
|
||||
20
7project/src/backend/docker-compose.test.yml
Normal file
20
7project/src/backend/docker-compose.test.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
version: "3.9"
|
||||
services:
|
||||
mariadb:
|
||||
image: mariadb:11.4
|
||||
container_name: test-mariadb
|
||||
environment:
|
||||
MARIADB_ROOT_PASSWORD: rootpw
|
||||
MARIADB_DATABASE: group_project
|
||||
MARIADB_USER: appuser
|
||||
MARIADB_PASSWORD: apppass
|
||||
ports:
|
||||
- "3307:3306" # host:container (use 3307 on host to avoid conflicts)
|
||||
healthcheck:
|
||||
test: ["CMD", "mariadb-admin", "ping", "-h", "127.0.0.1", "-u", "root", "-prootpw", "--silent"]
|
||||
interval: 5s
|
||||
timeout: 2s
|
||||
retries: 20
|
||||
# Truly ephemeral, fast storage (removed when container stops)
|
||||
tmpfs:
|
||||
- /var/lib/mysql
|
||||
113
7project/src/backend/test_locally.sh
Executable file
113
7project/src/backend/test_locally.sh
Executable file
@@ -0,0 +1,113 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Run tests against a disposable local MariaDB on host port 3307 using Docker Compose.
|
||||
# Requirements: Docker, docker compose plugin, Python, Alembic, pytest.
|
||||
# Usage:
|
||||
# chmod +x ./test_locally.sh
|
||||
# # From 7project/backend directory
|
||||
# ./test_locally.sh [--only-unit|--only-integration|--only-e2e] [pytest-args...]
|
||||
# # Examples:
|
||||
# ./test_locally.sh --only-unit -q
|
||||
# ./test_locally.sh --only-integration -k "login"
|
||||
# ./test_locally.sh --only-e2e -vv
|
||||
#
|
||||
# This script will:
|
||||
# 1) Start a MariaDB 11.4 container (ephemeral storage, port 3307)
|
||||
# 2) Wait until it's healthy
|
||||
# 3) Export env vars expected by the app (DATABASE_URL etc.)
|
||||
# 4) Run Alembic migrations
|
||||
# 5) Run pytest
|
||||
# 6) Tear everything down (containers and tmpfs data)
|
||||
|
||||
COMPOSE_FILE="docker-compose.test.yml"
|
||||
SERVICE_NAME="mariadb"
|
||||
CONTAINER_NAME="test-mariadb"
|
||||
|
||||
if ! command -v docker >/dev/null 2>&1; then
|
||||
echo "Docker is required but not found in PATH" >&2
|
||||
exit 1
|
||||
fi
|
||||
if ! docker compose version >/dev/null 2>&1; then
|
||||
echo "Docker Compose V2 plugin is required (docker compose)" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Bring up the DB
|
||||
echo "Starting MariaDB (port 3307) with docker compose..."
|
||||
docker compose -f "$COMPOSE_FILE" up -d
|
||||
|
||||
# Ensure we clean up on exit
|
||||
cleanup() {
|
||||
echo "\nTearing down docker compose stack..."
|
||||
docker compose -f "$COMPOSE_FILE" down -v || true
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Wait for healthy container
|
||||
echo -n "Waiting for MariaDB to become healthy"
|
||||
for i in {1..60}; do
|
||||
status=$(docker inspect -f '{{.State.Health.Status}}' "$CONTAINER_NAME" 2>/dev/null || echo "")
|
||||
if [ "$status" = "healthy" ]; then
|
||||
echo " -> healthy"
|
||||
break
|
||||
fi
|
||||
echo -n "."
|
||||
sleep 1
|
||||
if [ $i -eq 60 ]; then
|
||||
echo "\nMariaDB did not become healthy in time" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Export env vars for the app/tests (match app/core/db.py expectations)
|
||||
export MARIADB_HOST=127.0.0.1
|
||||
export MARIADB_PORT=3307
|
||||
export MARIADB_DB=group_project
|
||||
export MARIADB_USER=appuser
|
||||
export MARIADB_PASSWORD=apppass
|
||||
export DATABASE_URL="mysql+asyncmy://$MARIADB_USER:$MARIADB_PASSWORD@$MARIADB_HOST:$MARIADB_PORT/$MARIADB_DB"
|
||||
export PYTEST_RUN_CONFIG="True"
|
||||
|
||||
# Determine which tests to run based on flags
|
||||
UNIT_TESTS="tests/test_unit_user_service.py"
|
||||
INTEGRATION_TESTS="tests/test_integration_app.py"
|
||||
E2E_TESTS="tests/test_e2e.py"
|
||||
|
||||
FLAG_COUNT=0
|
||||
TEST_TARGET=""
|
||||
declare -a PYTEST_ARGS=()
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
--only-unit)
|
||||
TEST_TARGET="$UNIT_TESTS"; FLAG_COUNT=$((FLAG_COUNT+1));;
|
||||
--only-integration)
|
||||
TEST_TARGET="$INTEGRATION_TESTS"; FLAG_COUNT=$((FLAG_COUNT+1));;
|
||||
--only-e2e)
|
||||
TEST_TARGET="$E2E_TESTS"; FLAG_COUNT=$((FLAG_COUNT+1));;
|
||||
*)
|
||||
PYTEST_ARGS+=("$arg");;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ "$FLAG_COUNT" -gt 1 ]; then
|
||||
echo "Error: Use only one of --only-unit, --only-integration, or --only-e2e" >&2
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Run Alembic migrations then tests
|
||||
pushd . >/dev/null
|
||||
echo "Running Alembic migrations..."
|
||||
alembic upgrade head
|
||||
|
||||
echo "Running pytest..."
|
||||
if [ -n "$TEST_TARGET" ]; then
|
||||
# Use "${PYTEST_ARGS[@]:-}" to safely expand empty array with 'set -u'
|
||||
pytest "$TEST_TARGET" "${PYTEST_ARGS[@]:-}"
|
||||
else
|
||||
# Use "${PYTEST_ARGS[@]:-}" to safely expand empty array with 'set -u'
|
||||
pytest "${PYTEST_ARGS[@]:-}"
|
||||
fi
|
||||
popd >/dev/null
|
||||
|
||||
# Cleanup handled by trap
|
||||
@@ -3,17 +3,6 @@ import pytest
|
||||
from httpx import AsyncClient, ASGITransport
|
||||
|
||||
|
||||
def test_root_ok(client):
|
||||
resp = client.get("/")
|
||||
assert resp.status_code == status.HTTP_200_OK
|
||||
assert resp.json() == {"status": "ok"}
|
||||
|
||||
|
||||
def test_authenticated_route_requires_auth(client):
|
||||
resp = client.get("/authenticated-route")
|
||||
assert resp.status_code in (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_and_get_category(fastapi_app, test_user):
|
||||
# Use AsyncClient for async tests
|
||||
@@ -165,6 +154,6 @@ async def test_delete_transaction_not_found(fastapi_app, test_user):
|
||||
async with AsyncClient(transport=transport, base_url="http://testserver") as ac:
|
||||
token = (await ac.post("/auth/jwt/login", data=test_user)).json()["access_token"]
|
||||
h = {"Authorization": f"Bearer {token}"}
|
||||
r = await ac.delete("/transactions/999999/delete", headers=h)
|
||||
r = await ac.delete("/transactions/9999999/delete", headers=h)
|
||||
assert r.status_code == status.HTTP_404_NOT_FOUND
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
import types
|
||||
import asyncio
|
||||
import pytest
|
||||
|
||||
from fastapi import status
|
||||
from app.services import user_service
|
||||
|
||||
|
||||
@@ -22,6 +20,15 @@ def test_get_jwt_strategy_lifetime():
|
||||
# Basic smoke check: strategy has a lifetime set to 604800
|
||||
assert getattr(strategy, "lifetime_seconds", None) in (604800,)
|
||||
|
||||
def test_root_ok(client):
|
||||
resp = client.get("/")
|
||||
assert resp.status_code == status.HTTP_200_OK
|
||||
assert resp.json() == {"status": "ok"}
|
||||
|
||||
|
||||
def test_authenticated_route_requires_auth(client):
|
||||
resp = client.get("/authenticated-route")
|
||||
assert resp.status_code in (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_on_after_request_verify_enqueues_email(monkeypatch):
|
||||
@@ -120,3 +120,5 @@ spec:
|
||||
secretKeyRef:
|
||||
name: prod
|
||||
key: SMTP_FROM
|
||||
- name: APP_POD_URL
|
||||
value: {{ printf "http://%s.%s.svc.cluster.local" .Values.app.name .Release.Namespace | quote }}
|
||||
|
Before Width: | Height: | Size: 1.5 KiB After Width: | Height: | Size: 1.5 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user