Compare commits

19 Commits

Author SHA1 Message Date
31add42d6d update report 2025-11-13 11:13:11 +01:00
4de79169a2 update report 2025-11-13 11:11:16 +01:00
59d53967b0 update report
Some checks are pending
Deploy Prod / Run Python Tests (push) Waiting to run
Deploy Prod / Build and push image (reusable) (push) Blocked by required conditions
Deploy Prod / Generate Production URLs (push) Blocked by required conditions
Deploy Prod / Frontend - Build and Deploy to Cloudflare Pages (prod) (push) Blocked by required conditions
Deploy Prod / Helm upgrade/install (prod) (push) Blocked by required conditions
2025-11-13 01:35:13 +01:00
f3086f8c73 update report, edit deployment, update tfvars.example 2025-11-13 00:04:31 +01:00
ribardej
fd437b1caf feat(frontend): implemented CSAS button responsiveness 2025-11-12 20:21:31 +01:00
96ebc27001 updates
Some checks are pending
Deploy Prod / Run Python Tests (push) Waiting to run
Deploy Prod / Build and push image (reusable) (push) Blocked by required conditions
Deploy Prod / Generate Production URLs (push) Blocked by required conditions
Deploy Prod / Frontend - Build and Deploy to Cloudflare Pages (prod) (push) Blocked by required conditions
Deploy Prod / Helm upgrade/install (prod) (push) Blocked by required conditions
2025-11-12 17:34:50 +01:00
ribardej
922651fdbf fix(frontend): implemented CSAS button responsiveness 2025-11-12 15:37:53 +01:00
ribardej
e164b185e0 feat(frontend): implemented CSAS button responsiveness 2025-11-12 15:31:30 +01:00
ribardej
186b4fd09a fix(frontend): implemented multiple transaction selections in UI 2025-11-12 15:21:08 +01:00
ribardej
280d495335 feat(frontend): implemented multiple transaction selections in UI 2025-11-12 15:10:00 +01:00
ribardej
e73233c90a feat(docs): report.md update and refactored tests 2025-11-12 14:42:04 +01:00
ribardej
aade78bf3f feat(docs): report.md update and added options to test-with-ephemeral-mariadb.sh 2025-11-12 14:12:04 +01:00
ribardej
50e489a8e0 feat(tests): implemented local test DB container for isolation 2025-11-12 13:29:20 +01:00
ribardej
1679abb71f feat(tests): implemented local test DB container for isolation 2025-11-12 13:29:09 +01:00
573404dead feat(infrastructure): use correct url
Some checks are pending
Deploy Prod / Run Python Tests (push) Waiting to run
Deploy Prod / Build and push image (reusable) (push) Blocked by required conditions
Deploy Prod / Generate Production URLs (push) Blocked by required conditions
Deploy Prod / Frontend - Build and Deploy to Cloudflare Pages (prod) (push) Blocked by required conditions
Deploy Prod / Helm upgrade/install (prod) (push) Blocked by required conditions
2025-11-12 01:11:53 +01:00
d57dd82a64 feat(infrastructure): use correct url 2025-11-12 01:09:29 +01:00
50f37c1161 feat(infrastructure): use newer image 2025-11-12 00:58:54 +01:00
ae22d2ee5f feat(infrastructure): make tests mandatory 2025-11-12 00:46:36 +01:00
509608f8c9 Merge pull request #50 from dat515-2025/merge/update_workers
feat(workers): update workers
2025-11-12 00:42:16 +01:00
22 changed files with 685 additions and 265 deletions

View File

@@ -33,7 +33,7 @@ jobs:
runner: vhs runner: vhs
mode: pr mode: pr
pr_number: ${{ github.event.pull_request.number }} pr_number: ${{ github.event.pull_request.number }}
base_domain: ${{ vars.DEV_BASE_DOMAIN }} base_domain: ${{ vars.PROD_DOMAIN }}
secrets: inherit secrets: inherit
frontend: frontend:
@@ -77,7 +77,7 @@ jobs:
- name: Helm upgrade/install PR preview - name: Helm upgrade/install PR preview
env: env:
DEV_BASE_DOMAIN: ${{ secrets.BASE_DOMAIN }} DEV_BASE_DOMAIN: ${{ vars.BASE_DOMAIN }}
RABBITMQ_PASSWORD: ${{ secrets.PROD_RABBITMQ_PASSWORD }} RABBITMQ_PASSWORD: ${{ secrets.PROD_RABBITMQ_PASSWORD }}
DB_PASSWORD: ${{ secrets.PROD_DB_PASSWORD }} DB_PASSWORD: ${{ secrets.PROD_DB_PASSWORD }}
DIGEST: ${{ needs.build.outputs.digest }} DIGEST: ${{ needs.build.outputs.digest }}

View File

@@ -27,6 +27,7 @@ jobs:
build: build:
name: Build and push image (reusable) name: Build and push image (reusable)
needs: [test]
uses: ./.github/workflows/build-image.yaml uses: ./.github/workflows/build-image.yaml
with: with:
mode: prod mode: prod
@@ -36,6 +37,7 @@ jobs:
get_urls: get_urls:
name: Generate Production URLs name: Generate Production URLs
needs: [test]
uses: ./.github/workflows/url_generator.yml uses: ./.github/workflows/url_generator.yml
with: with:
mode: prod mode: prod

View File

@@ -1,4 +1,4 @@
FROM python:3.11-slim FROM python:3.11-trixie
WORKDIR /app WORKDIR /app
COPY requirements.txt . COPY requirements.txt .

View File

@@ -1,10 +1,11 @@
import uuid import uuid
from typing import Optional from typing import Optional, Dict, Any
from fastapi_users import schemas from fastapi_users import schemas
class UserRead(schemas.BaseUser[uuid.UUID]): class UserRead(schemas.BaseUser[uuid.UUID]):
first_name: Optional[str] = None first_name: Optional[str] = None
last_name: Optional[str] = None last_name: Optional[str] = None
config: Optional[Dict[str, Any]] = None
class UserCreate(schemas.BaseUserCreate): class UserCreate(schemas.BaseUserCreate):
first_name: Optional[str] = None first_name: Optional[str] = None

View File

@@ -1,5 +1,6 @@
import json import json
import logging import logging
import os
from os.path import dirname, join from os.path import dirname, join
from time import strptime from time import strptime
from uuid import UUID from uuid import UUID
@@ -55,7 +56,7 @@ def _load_mock_bank_transactions(user_id: UUID) -> None:
transactions = [] transactions = []
with httpx.Client() as client: with httpx.Client() as client:
response = client.get("http://127.0.0.1:8000/mock-bank/scrape") response = client.get(f"{os.getenv('APP_POD_URL')}/mock-bank/scrape")
if response.status_code != httpx.codes.OK: if response.status_code != httpx.codes.OK:
return return
for transaction in response.json(): for transaction in response.json():

View File

@@ -0,0 +1,20 @@
version: "3.9"
services:
mariadb:
image: mariadb:11.4
container_name: test-mariadb
environment:
MARIADB_ROOT_PASSWORD: rootpw
MARIADB_DATABASE: group_project
MARIADB_USER: appuser
MARIADB_PASSWORD: apppass
ports:
- "3307:3306" # host:container (use 3307 on host to avoid conflicts)
healthcheck:
test: ["CMD", "mariadb-admin", "ping", "-h", "127.0.0.1", "-u", "root", "-prootpw", "--silent"]
interval: 5s
timeout: 2s
retries: 20
# Truly ephemeral, fast storage (removed when container stops)
tmpfs:
- /var/lib/mysql

View File

@@ -0,0 +1,113 @@
#!/usr/bin/env bash
set -euo pipefail
# Run tests against a disposable local MariaDB on host port 3307 using Docker Compose.
# Requirements: Docker, docker compose plugin, Python, Alembic, pytest.
# Usage:
# chmod +x ./test-with-ephemeral-mariadb.sh
# # From 7project/backend directory
# ./test-with-ephemeral-mariadb.sh [--only-unit|--only-integration|--only-e2e] [pytest-args...]
# # Examples:
# ./test-with-ephemeral-mariadb.sh --only-unit -q
# ./test-with-ephemeral-mariadb.sh --only-integration -k "login"
# ./test-with-ephemeral-mariadb.sh --only-e2e -vv
#
# This script will:
# 1) Start a MariaDB 11.4 container (ephemeral storage, port 3307)
# 2) Wait until it's healthy
# 3) Export env vars expected by the app (DATABASE_URL etc.)
# 4) Run Alembic migrations
# 5) Run pytest
# 6) Tear everything down (containers and tmpfs data)
COMPOSE_FILE="docker-compose.test.yml"
SERVICE_NAME="mariadb"
CONTAINER_NAME="test-mariadb"
if ! command -v docker >/dev/null 2>&1; then
echo "Docker is required but not found in PATH" >&2
exit 1
fi
if ! docker compose version >/dev/null 2>&1; then
echo "Docker Compose V2 plugin is required (docker compose)" >&2
exit 1
fi
# Bring up the DB
echo "Starting MariaDB (port 3307) with docker compose..."
docker compose -f "$COMPOSE_FILE" up -d
# Ensure we clean up on exit
cleanup() {
echo "\nTearing down docker compose stack..."
docker compose -f "$COMPOSE_FILE" down -v || true
}
trap cleanup EXIT
# Wait for healthy container
echo -n "Waiting for MariaDB to become healthy"
for i in {1..60}; do
status=$(docker inspect -f '{{.State.Health.Status}}' "$CONTAINER_NAME" 2>/dev/null || echo "")
if [ "$status" = "healthy" ]; then
echo " -> healthy"
break
fi
echo -n "."
sleep 1
if [ $i -eq 60 ]; then
echo "\nMariaDB did not become healthy in time" >&2
exit 1
fi
done
# Export env vars for the app/tests (match app/core/db.py expectations)
export MARIADB_HOST=127.0.0.1
export MARIADB_PORT=3307
export MARIADB_DB=group_project
export MARIADB_USER=appuser
export MARIADB_PASSWORD=apppass
export DATABASE_URL="mysql+asyncmy://$MARIADB_USER:$MARIADB_PASSWORD@$MARIADB_HOST:$MARIADB_PORT/$MARIADB_DB"
export PYTEST_RUN_CONFIG="True"
# Determine which tests to run based on flags
UNIT_TESTS="tests/test_unit_user_service.py"
INTEGRATION_TESTS="tests/test_integration_app.py"
E2E_TESTS="tests/test_e2e.py"
FLAG_COUNT=0
TEST_TARGET=""
declare -a PYTEST_ARGS=()
for arg in "$@"; do
case "$arg" in
--only-unit)
TEST_TARGET="$UNIT_TESTS"; FLAG_COUNT=$((FLAG_COUNT+1));;
--only-integration)
TEST_TARGET="$INTEGRATION_TESTS"; FLAG_COUNT=$((FLAG_COUNT+1));;
--only-e2e)
TEST_TARGET="$E2E_TESTS"; FLAG_COUNT=$((FLAG_COUNT+1));;
*)
PYTEST_ARGS+=("$arg");;
esac
done
if [ "$FLAG_COUNT" -gt 1 ]; then
echo "Error: Use only one of --only-unit, --only-integration, or --only-e2e" >&2
exit 2
fi
# Run Alembic migrations then tests
pushd . >/dev/null
echo "Running Alembic migrations..."
alembic upgrade head
echo "Running pytest..."
if [ -n "$TEST_TARGET" ]; then
# Use "${PYTEST_ARGS[@]:-}" to safely expand empty array with 'set -u'
pytest "$TEST_TARGET" "${PYTEST_ARGS[@]:-}"
else
# Use "${PYTEST_ARGS[@]:-}" to safely expand empty array with 'set -u'
pytest "${PYTEST_ARGS[@]:-}"
fi
popd >/dev/null
# Cleanup handled by trap

View File

@@ -3,17 +3,6 @@ import pytest
from httpx import AsyncClient, ASGITransport from httpx import AsyncClient, ASGITransport
def test_root_ok(client):
resp = client.get("/")
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == {"status": "ok"}
def test_authenticated_route_requires_auth(client):
resp = client.get("/authenticated-route")
assert resp.status_code in (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN)
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_create_and_get_category(fastapi_app, test_user): async def test_create_and_get_category(fastapi_app, test_user):
# Use AsyncClient for async tests # Use AsyncClient for async tests
@@ -165,6 +154,6 @@ async def test_delete_transaction_not_found(fastapi_app, test_user):
async with AsyncClient(transport=transport, base_url="http://testserver") as ac: async with AsyncClient(transport=transport, base_url="http://testserver") as ac:
token = (await ac.post("/auth/jwt/login", data=test_user)).json()["access_token"] token = (await ac.post("/auth/jwt/login", data=test_user)).json()["access_token"]
h = {"Authorization": f"Bearer {token}"} h = {"Authorization": f"Bearer {token}"}
r = await ac.delete("/transactions/999999/delete", headers=h) r = await ac.delete("/transactions/9999999/delete", headers=h)
assert r.status_code == status.HTTP_404_NOT_FOUND assert r.status_code == status.HTTP_404_NOT_FOUND

View File

@@ -1,7 +1,5 @@
import types
import asyncio
import pytest import pytest
from fastapi import status
from app.services import user_service from app.services import user_service
@@ -22,6 +20,15 @@ def test_get_jwt_strategy_lifetime():
# Basic smoke check: strategy has a lifetime set to 604800 # Basic smoke check: strategy has a lifetime set to 604800
assert getattr(strategy, "lifetime_seconds", None) in (604800,) assert getattr(strategy, "lifetime_seconds", None) in (604800,)
def test_root_ok(client):
resp = client.get("/")
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == {"status": "ok"}
def test_authenticated_route_requires_auth(client):
resp = client.get("/authenticated-route")
assert resp.status_code in (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN)
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_on_after_request_verify_enqueues_email(monkeypatch): async def test_on_after_request_verify_enqueues_email(monkeypatch):

View File

@@ -120,3 +120,5 @@ spec:
secretKeyRef: secretKeyRef:
name: prod name: prod
key: SMTP_FROM key: SMTP_FROM
- name: APP_POD_URL
value: {{ printf "http://%s.%s.svc.cluster.local" .Values.app.name .Release.Namespace | quote }}

View File

@@ -133,6 +133,9 @@ export type User = {
is_active: boolean; is_active: boolean;
is_superuser: boolean; is_superuser: boolean;
is_verified: boolean; is_verified: boolean;
// Optional JSON config object for user-level integrations and settings
// Example: { csas: "{\"expires_at\": 1761824615, ...}" } or { csas: { expires_at: 1761824615, ... } }
config?: Record<string, any> | null;
}; };
export async function getMe(): Promise<User> { export async function getMe(): Promise<User> {

View File

@@ -1,5 +1,2 @@
export const BACKEND_URL: string = export const BACKEND_URL: string =
import.meta.env.VITE_BACKEND_URL ?? ''; import.meta.env.VITE_BACKEND_URL ?? 'http://127.0.0.1:8000';
export const VITE_UNIRATE_API_KEY: string =
import.meta.env.VITE_UNIRATE_API_KEY ?? 'wYXMiA0bz8AVRHtiS9hbKIr4VP3k5Qff8XnQdKQM45YM3IwFWP6y73r3KMkv1590';

View File

@@ -1,5 +1,5 @@
import { useEffect, useMemo, useState } from 'react'; import { useEffect, useMemo, useState, useCallback } from 'react';
import { type Category, type Transaction, type BalancePoint, deleteTransaction, getCategories, getTransactions, createTransaction, updateTransaction, getBalanceSeries } from '../api'; import { type Category, type Transaction, type BalancePoint, getMe, deleteTransaction, getCategories, getTransactions, createTransaction, updateTransaction, getBalanceSeries } from '../api';
import AccountPage from './AccountPage'; import AccountPage from './AccountPage';
import AppearancePage from './AppearancePage'; import AppearancePage from './AppearancePage';
import BalanceChart from './BalanceChart'; import BalanceChart from './BalanceChart';
@@ -118,6 +118,47 @@ export default function Dashboard({ onLogout }: { onLogout: () => void }) {
const [isMockModalOpen, setMockModalOpen] = useState(false); const [isMockModalOpen, setMockModalOpen] = useState(false);
const [isGenerating, setIsGenerating] = useState(false); const [isGenerating, setIsGenerating] = useState(false);
// Current user and CSAS connection status
const [csasConnected, setCsasConnected] = useState(false);
useEffect(() => {
(async () => {
try {
const u = await getMe();
// Determine CSAS connection validity
const csas = (u as any)?.config?.csas;
let obj: any = null;
if (csas) {
if (typeof csas === 'string') {
try { obj = JSON.parse(csas); } catch {}
} else if (typeof csas === 'object') {
obj = csas;
}
}
let exp: number | null = null;
const raw = obj?.expires_at;
if (typeof raw === 'number') {
exp = raw;
} else if (typeof raw === 'string') {
const asNum = Number(raw);
if (!Number.isNaN(asNum)) {
exp = asNum;
} else {
const ms = Date.parse(raw);
if (!Number.isNaN(ms)) exp = Math.floor(ms / 1000);
}
}
if (exp && exp > Math.floor(Date.now() / 1000)) {
setCsasConnected(true);
} else {
setCsasConnected(false);
}
} catch (e) {
// ignore, user may not be loaded; keep button enabled
}
})();
}, []);
// Start CSAS (George) OAuth after login // Start CSAS (George) OAuth after login
async function startOauthCsas() { async function startOauthCsas() {
const base = BACKEND_URL.replace(/\/$/, ''); const base = BACKEND_URL.replace(/\/$/, '');
@@ -168,7 +209,14 @@ export default function Dashboard({ onLogout }: { onLogout: () => void }) {
// Sidebar toggle for mobile // Sidebar toggle for mobile
const [sidebarOpen, setSidebarOpen] = useState(false); const [sidebarOpen, setSidebarOpen] = useState(false);
// Multi-select state for transactions and bulk category assignment
const [selectedTxIds, setSelectedTxIds] = useState<number[]>([]);
const [bulkCategoryIds, setBulkCategoryIds] = useState<number[]>([]);
const toggleSelectTx = useCallback((id: number) => {
setSelectedTxIds(prev => prev.includes(id) ? prev.filter(x => x !== id) : [...prev, id]);
}, []);
const clearSelection = useCallback(() => setSelectedTxIds([]), []);
const selectAllVisible = useCallback((ids: number[]) => setSelectedTxIds(ids), []);
async function loadAll() { async function loadAll() {
setLoading(true); setLoading(true);
@@ -241,7 +289,7 @@ export default function Dashboard({ onLogout }: { onLogout: () => void }) {
} }
} }
useEffect(() => { loadAll(); }, [startDate, endDate]); useEffect(() => { loadAll(); clearSelection(); }, [startDate, endDate]);
const filtered = useMemo(() => { const filtered = useMemo(() => {
let arr = [...transactions]; let arr = [...transactions];
@@ -267,6 +315,9 @@ export default function Dashboard({ onLogout }: { onLogout: () => void }) {
const pageEnd = pageStart + pageSize; const pageEnd = pageStart + pageSize;
const visible = sortedDesc.slice(pageStart, pageEnd); const visible = sortedDesc.slice(pageStart, pageEnd);
// Reset selection when page or filters impacting visible set change
useEffect(() => { clearSelection(); }, [page, minAmount, maxAmount, filterCategoryId, searchText]);
function categoryNameById(id: number) { return categories.find(c => c.id === id)?.name || `#${id}`; } function categoryNameById(id: number) { return categories.find(c => c.id === id)?.name || `#${id}`; }
@@ -354,7 +405,7 @@ export default function Dashboard({ onLogout }: { onLogout: () => void }) {
<h3>Bank connections</h3> <h3>Bank connections</h3>
<div className="connection-row"> <div className="connection-row">
<p className="muted" style={{ margin: 0 }}>Connect your CSAS (George) account.</p> <p className="muted" style={{ margin: 0 }}>Connect your CSAS (George) account.</p>
<button className="btn primary" onClick={startOauthCsas}>Connect CSAS (George)</button> <button className="btn primary" onClick={startOauthCsas} disabled={csasConnected}>{csasConnected ? 'Successfully connected to CSAS' : 'Connect CSAS (George)'}</button>
</div> </div>
<div className="connection-row"> <div className="connection-row">
<p className="muted" style={{ margin: 0 }}>Generate data from a mock bank.</p> <p className="muted" style={{ margin: 0 }}>Generate data from a mock bank.</p>
@@ -416,7 +467,55 @@ export default function Dashboard({ onLogout }: { onLogout: () => void }) {
<div className="muted"> <div className="muted">
Showing {visible.length} of {filtered.length} (page {Math.min(page + 1, Math.max(1, totalPages))}/{Math.max(1, totalPages)}) Showing {visible.length} of {filtered.length} (page {Math.min(page + 1, Math.max(1, totalPages))}/{Math.max(1, totalPages)})
</div> </div>
<div className="actions"> <div className="actions" style={{ gap: 8, alignItems: 'center' }}>
{selectedTxIds.length > 0 && (
<>
<span className="muted">Selected: {selectedTxIds.length}</span>
<select
className="input"
multiple
value={bulkCategoryIds.map(String)}
onChange={(e) => {
const ids = Array.from(e.currentTarget.selectedOptions).map(o => Number(o.value));
setBulkCategoryIds(ids);
}}
>
{categories.map(c => (
<option key={c.id} value={c.id}>{c.name}</option>
))}
</select>
<button
className="btn primary"
onClick={async () => {
if (bulkCategoryIds.length === 0) {
alert('Pick at least one category to assign.');
return;
}
try {
// Apply selected categories to each selected transaction, replacing their categories
const updates = await Promise.allSettled(
selectedTxIds.map(id => updateTransaction(id, { category_ids: bulkCategoryIds }))
);
const fulfilled = updates.filter(u => u.status === 'fulfilled') as PromiseFulfilledResult<Transaction>[];
const updatedById = new Map<number, Transaction>(fulfilled.map(f => [f.value.id, f.value]));
setTransactions(prev => prev.map(t => updatedById.get(t.id) || t));
try { setBalanceSeries(await getBalanceSeries(startDate || undefined, endDate || undefined)); } catch {}
if (fulfilled.length !== selectedTxIds.length) {
alert(`Assigned categories to ${fulfilled.length} of ${selectedTxIds.length} selected transactions. Some updates failed.`);
}
} catch (e: any) {
alert(e?.message || 'Failed to assign categories');
} finally {
clearSelection();
setBulkCategoryIds([]);
}
}}
>
Apply categories to selected
</button>
<button className="btn" onClick={clearSelection}>Clear selection</button>
</>
)}
<button className="btn primary" disabled={page <= 0} onClick={() => setPage(p => Math.max(0, p - 1))}>Previous</button> <button className="btn primary" disabled={page <= 0} onClick={() => setPage(p => Math.max(0, p - 1))}>Previous</button>
<button className="btn primary" disabled={page >= totalPages - 1} onClick={() => setPage(p => Math.min(totalPages - 1, p + 1))}>Next</button> <button className="btn primary" disabled={page >= totalPages - 1} onClick={() => setPage(p => Math.min(totalPages - 1, p + 1))}>Next</button>
</div> </div>
@@ -424,6 +523,21 @@ export default function Dashboard({ onLogout }: { onLogout: () => void }) {
<table className="table responsive"> <table className="table responsive">
<thead> <thead>
<tr> <tr>
<th style={{ width: 36 }}>
<input
type="checkbox"
aria-label="Select all on page"
checked={visible.length > 0 && visible.every(v => selectedTxIds.includes(v.id))}
onChange={(e) => {
if (e.currentTarget.checked) {
selectAllVisible(visible.map(v => v.id));
} else {
// remove only currently visible from selection
setSelectedTxIds(prev => prev.filter(id => !visible.some(v => v.id === id)));
}
}}
/>
</th>
<th>Date</th> <th>Date</th>
<th style={{ textAlign: 'right' }}>Amount</th> <th style={{ textAlign: 'right' }}>Amount</th>
<th>Description</th> <th>Description</th>
@@ -433,7 +547,15 @@ export default function Dashboard({ onLogout }: { onLogout: () => void }) {
</thead> </thead>
<tbody> <tbody>
{visible.map(t => ( {visible.map(t => (
<tr key={t.id}> <tr key={t.id} style={{ backgroundColor: selectedTxIds.includes(t.id) ? 'rgba(88, 136, 255, 0.1)' : undefined }}>
<td>
<input
type="checkbox"
aria-label={`Select transaction ${t.id}`}
checked={selectedTxIds.includes(t.id)}
onChange={() => toggleSelectTx(t.id)}
/>
</td>
{/* Date cell */} {/* Date cell */}
<td data-label="Date"> <td data-label="Date">
{editingTxId === t.id ? ( {editingTxId === t.id ? (

View File

@@ -1,9 +1,9 @@
# Personal finance tracker # Personal finance tracker
> **Instructions**: <!--- **Instructions**:
> This template provides the structure for your project report. > This template provides the structure for your project report.
> Replace the placeholder text with your actual content. > Replace the placeholder text with your actual content.
> Remove instructions that are not relevant for your project, but leave the headings along with a (NA) label. > Remove instructions that are not relevant for your project, but leave the headings along with a (NA) label. -->
## Project Overview ## Project Overview
@@ -12,210 +12,414 @@
**Group Members**: **Group Members**:
- 289229, Lukáš Trkan, lukastrkan - 289229, Lukáš Trkan, lukastrkan
- 289258, Dejan Ribarovski, derib2613, ribardej - 289258, Dejan Ribarovski, ribardej (derib2613)
**Brief Description**: **Brief Description**:
Our application is a finance tracker, so a person can easily track his cash flow Our application allows users to easily track their cash flow
through multiple bank accounts. Person can label transactions with custom categories through multiple bank accounts. Users can label their transactions with custom categories that can be later used for
and later filter by them. filtering and visualization. New transactions are automatically fetched in the background.
## Architecture Overview ## Architecture Overview
Our system is a fullstack web application composed of a React frontend, a FastAPI backend, a PostgreSQL database, and asynchronous background workers powered by Celery with RabbitMQ. Redis is available for caching/kv and may be used by Celery as a result backend. The backend exposes REST endpoints for authentication (email/password and OAuth), users, categories, and transactions. A thin controller layer (FastAPI routers) lives under app/api. Infrastructure for Kubernetes is provided via OpenTofu (Terraformcompatible) modules and the application is packaged via a Helm chart.
Our system is a fullstack web application composed of a React frontend, a FastAPI backend,
a asynchronousMariaDB database with Maxscale, and background workers powered by Celery with RabbitMQ.
The backend exposes REST endpoints for authentication (email/password and OAuth), users, categories,
transactions, exchange rates and bank APIs. Infrastructure for Kubernetes is managed via Terraform/OpenTofu and
the application is packaged via a Helm chart. This all is deployed on private TalosOS cluster running on Proxmox VE with
CI/CD and with public access over Cloudflare tunnels. Static files for frontend are served via Cloudflare pages.
Other services deployed in the cluster includes Longhorn for persistent storage, Prometheus with Grafana for monitoring.
### High-Level Architecture ### High-Level Architecture
```mermaid ```mermaid
flowchart LR flowchart LR
proc_queue[Message Queue] --> proc_queue_worker[Worker Service] n3(("User")) <--> client["Frontend"]
proc_queue_worker --> ext_mail[(Email Service)] proc_queue["Message Queue"] --> proc_queue_worker["Worker Service"]
proc_cron[Task planner] --> proc_queue proc_queue_worker -- SMTP --> ext_mail[("Email Service")]
proc_queue_worker --> ext_bank[(Bank API)] proc_queue_worker <-- HTTP request/response --> ext_bank[("Bank API")]
proc_queue_worker --> db proc_queue_worker <--> db[("Database")]
client[Client/Frontend] <--> svc[Backend API] proc_cron["Cron"] <-- HTTP request/response --> svc["Backend API"]
svc --> proc_queue svc --> proc_queue
svc <--> db[(Database)] n2["Cloudflare tunnel"] <-- HTTP request/response --> svc
svc <--> db
svc <-- HTTP request/response --> api[("UniRate API")]
client <-- HTTP request/response --> n2
``` ```
The workflow works in the following way: The workflow works in the following way:
- Client connects to the frontend. After login, frontend automatically fetches the stored transactions from
the database via the backend API - Client connects to the frontend. After login, frontend automatically fetches the stored transactions from
- When the client opts for fetching new transactions via the Bank API, the backend delegates the task the database via the backend API and currency rates from UniRate API.
to a background worker service via the Message queue. - When the client opts for fetching new transactions via the Bank API, cron will trigger periodic fetching
using background worker.
- After successful load, these transactions are stored to the database and displayed to the client - After successful load, these transactions are stored to the database and displayed to the client
- There is also a Task planner, that executes periodic tasks, like fetching new transactions automatically from the Bank API
### Features
- The stored transactions are encrypted in the DB for security reasons.
- For every pull request the full APP is deployed on a separate URL and the tests are run by github CI/CD
- On every push to main, the production app is automatically updated
- UI is responsive for mobile devices
- Slow operations (emails, transactions fetching) are handled
in the background by Celery workers.
- App is monitored using prometheus metrics endpoint and metrics are shown in Grafana dashboard.
### Components ### Components
- Frontend (frontend/): React + TypeScript app built with Vite. Talks to the backend via REST, handles login/registration, shows latest transactions, filtering, and allows adding transactions. - Frontend (frontend/): React + TypeScript app built with Vite. Talks to the backend via REST, handles
- Backend API (backend/app): FastAPI app with routers under app/api for auth, categories, and transactions. Uses FastAPI Users for auth (JWT + OAuth), SQLAlchemy ORM, and Pydantic v2 schemas. login/registration, shows latest transactions, filtering, and allows adding transactions.
- Worker service (backend/app/workers): Celery worker handling asynchronous tasks (e.g., sending verification emails, future background processing). - Backend API (backend/app): FastAPI app with routers under app/api for auth, users, categories, transactions, exchange
- Database (PostgreSQL): Persists users, categories, transactions; schema managed by Alembic migrations. rates and bankAPI. Uses FastAPI Users for auth (JWT + OAuth), SQLAlchemy ORM, and Pydantic v2 schemas.
- Message Queue (RabbitMQ): Transports background jobs from the API to the worker. - Worker service (backend/app/workers): Celery worker handling background tasks (emails, transactions fetching).
- Cache/Result Store (Redis): Available for caching or Celery result backend. - Database (MariaDB with Maxscale): Persists users, categories, transactions; schema managed by Alembic migrations.
- Infrastructure as Code (tofu/): OpenTofu modules provisioning cluster services (RabbitMQ, Redis, Argo CD, cert-manager, Cloudflare tunnel, etc.). - Message Queue (RabbitMQ): Queues background tasks for Celery workers.
- Infrastructure as Code (tofu/): OpenTofu modules provisioning cluster services (RabbitMQ, Redis, Cloudflare tunnel, etc.).
- Deployment Chart (charts/myapp-chart/): Helm chart to deploy the application to Kubernetes. - Deployment Chart (charts/myapp-chart/): Helm chart to deploy the application to Kubernetes.
### Technologies Used ### Technologies Used
- Backend: Python, FastAPI, FastAPI Users, SQLAlchemy, Pydantic, Alembic, Celery - Backend: Python, FastAPI, FastAPI Users, SQLAlchemy, Pydantic, Alembic, Celery
- Frontend: React, TypeScript, Vite - Frontend: React, TypeScript, Vite
- Database: MariaDB (Maxscale) - Database: MariaDB with Maxscale
- Background jobs: RabbitMQ, Celery - Background jobs: RabbitMQ, Celery
- Containerization/Orchestration: Docker, Docker Compose (dev), Kubernetes, Helm - Containerization/Orchestration: Docker, Docker Compose (dev), Kubernetes, Helm
- IaC/Platform: Proxmox, Talos, Cloudflare pages, OpenTofu (Terraform), cert-manager, MetalLB, Cloudflare Tunnel, Prometheus, Loki - IaC/Platform: Proxmox, Talos, Cloudflare pages, OpenTofu (Terraform), cert-manager, MetalLB, Cloudflare Tunnel,
Prometheus, Loki
## Prerequisites ## Prerequisites
### System Requirements ### System Requirements
- Operating System (dev): Linux, macOS, or Windows with Docker support #### Development
- Operating System (prod): Linux with kubernetes
- Minimum RAM: 4 GB (8 GB recommended for running backend, frontend, and database together) - Minimum RAM: 8 GB
- Storage: 4 GB free (Docker images may require additional space) - Storage: 10 GB+ free
#### Production
- 1 + 4 nodes
- CPU: 4 cores
- RAM: 8 GB
- Storage: 200 GB
### Required Software ### Required Software
- Docker Desktop or Docker Engine #### Development
- Docker Compose
- Docker
- Docker Compose
- Node.js and npm - Node.js and npm
- Python 3.12+ - Python 3.12
- MariaDB 11 - MariaDB 11
- Helm 3.12+ and kubectl 1.29+
#### Production
##### Minimal:
- domain name with Cloudflare`s nameservers - tunnel, pages
- Kubernetes cluster
- kubectl
- Helm
- OpenTofu - OpenTofu
### Environment Variables (common) ##### Our setup specifics:
# TODO: UPDATE - Proxmox VE
- Backend: SECRET, FRONTEND_URL, BACKEND_URL, DATABASE_URL, RABBITMQ_URL, REDIS_URL - TalosOS cluster
- talosctl
- GitHub self-hosted runner with access to the cluster
- TailScale for remote access to cluster
- OAuth vars (Backend): MOJEID_CLIENT_ID/SECRET, BANKID_CLIENT_ID/SECRET (optional) ### Environment Variables
- Frontend: VITE_BACKEND_URL
#### Backend
- `MOJEID_CLIENT_ID`, `MOJEID_CLIENT_SECRET` \- OAuth client ID and secret for
MojeID - https://www.mojeid.cz/en/provider/
- `BANKID_CLIENT_ID`, `BANKID_CLIENT_SECRET` \- OAuth client ID and secret for BankID - https://developer.bankid.cz/
- `CSAS_CLIENT_ID`, `CSAS_CLIENT_SECRET` \- OAuth client ID and secret for Česká
spořitelna - https://developers.erstegroup.com/docs/apis/bank.csas
- `DATABASE_URL`(or `MARIADB_HOST`, `MARIADB_PORT`, `MARIADB_DB`, `MARIADB_USER`, `MARIADB_PASSWORD`) \- MariaDB
connection details
- `RABBITMQ_USERNAME`, `RABBITMQ_PASSWORD` \- credentials for RabbitMQ
- `SENTRY_DSN` \- Sentry DSN for error reporting
- `DB_ENCRYPTION_KEY` \- symmetric key for encrypting sensitive data in the database
- `SMTP_HOST`, `SMTP_PORT`, `SMTP_USERNAME`, `SMTP_PASSWORD`, `SMTP_USE_TLS`, `SMTP_USE_SSL`, `SMTP_FROM` \- SMTP
configuration (host, port, auth credentials, TLS/SSL options, sender).
- `UNIRATE_API_KEY` \- API key for UniRate.
#### Frontend
- `VITE_BACKEND_URL` \- URL of the backend API
### Dependencies (key libraries) ### Dependencies (key libraries)
Backend: FastAPI, fastapi-users, SQLAlchemy, pydantic v2, Alembic, Celery, uvicorn
Backend: FastAPI, fastapi-users, SQLAlchemy, pydantic v2, Alembic, Celery, uvicorn, pytest
Frontend: React, TypeScript, Vite Frontend: React, TypeScript, Vite
## Local development ## Local development
You can run the project with Docker Compose and Python virtual environment for testing and dev purposes You can run the project with Docker Compose and Python virtual environment for testing and development purposes
### 1) Clone the Repository ### 1) Clone the Repository
```bash ```bash
git clone https://github.com/dat515-2025/Group-8.git git clone https://github.com/dat515-2025/Group-8.git
cd 7project cd Group-8/7project
``` ```
### 2) Install dependencies ### 2) Install dependencies
Backend Backend
```bash ```bash
cd backend
python3 -m venv .venv python3 -m venv .venv
source .venv/bin/activate source .venv/bin/activate
pip install -r requirements.txt pip install -r requirements.txt
``` ```
Frontend
### 3) Run Docker containers
```bash ```bash
# In 7project/frontend cd ..
npm install docker compose up -d
``` ```
### 3) Manual Local Run ### 4) Prepare the database
Backend
```bash ```bash
# From the 7project/ directory
docker compose up --build
# This starts: MariaDB, RabbitMQ
# Set environment variables (or create .env file)
# TODO: fix
export SECRET=CHANGE_ME_SECRET
export FRONTEND_DOMAIN_SCHEME=http://localhost:5173
export BANKID_CLIENT_ID=CHANGE_ME
export BANKID_CLIENT_SECRET=CHANGE_ME
export CSAS_CLIENT_ID=CHANGE_ME
export CSAS_CLIENT_SECRET=CHANGE_ME
export MOJEID_CLIENT_ID=CHANGE_ME
export MOJEID_CLIENT_SECRET=CHANGE_ME
# Apply DB migrations (Alembic)
# From 7project
bash upgrade_database.sh bash upgrade_database.sh
```
# Run API ### 5) Run backend
```bash
cd backend
#TODO: set env variables
uvicorn app.app:fastApi --reload --host 0.0.0.0 --port 8000 uvicorn app.app:fastApi --reload --host 0.0.0.0 --port 8000
```
### 6) Run Celery worker (optional, in another terminal)
```bash
cd Group-8/7project/backend
source .venv/bin/activate
celery -A app.celery_app.celery_app worker -l info celery -A app.celery_app.celery_app worker -l info
``` ```
Frontend ### 7) Install frontend dependencies and run
```bash ```bash
# Configure backend URL for dev cd ../frontend
echo 'VITE_BACKEND_URL=http://127.0.0.1:8000' > .env npm i
npm run dev npm run dev
# Open http://localhost:5173
``` ```
- Backend default: http://127.0.0.1:8000 (OpenAPI at /docs) - Backend available at: http://127.0.0.1:8000 (OpenAPI at /docs)
- Frontend default: http://localhost:5173 - Frontend available at: http://localhost:5173
## Build Instructions ## Build Instructions
### Backend ### Backend
```bash ```bash
# run in project7/backend cd 7project/backend
docker buildx build --platform linux/amd64,linux/arm64 -t your_container_registry/your_name --push . # Dont forget to set correct image tag with your registry and name
# For example lukastrkan/cc-app-demo or gitea.ltrk.dev/lukas/cc-app-demo
docker buildx build --platform linux/amd64,linux/arm64 -t CHANGE_ME --push .
``` ```
### Frontend ### Frontend
```bash ```bash
# run in project7/frontend cd project7/frontend
npm ci npm ci
npm run build npm run build
``` ```
## Deployment Instructions ## Deployment Instructions
### Setup Cluster
Deployment should work on any Kubernetes cluster. However, we are using 4 TalosOS virtual machines (1 control plane, 3 workers)
running on top of Proxmox VE.
1) Create 4 VMs with TalosOS ### Setup Cluster
Deployment should work on any Kubernetes cluster. However, we are using 4 TalosOS virtual machines (1 control plane, 3
workers)
running on top of Proxmox VE.
1) Create at least 4 VMs with TalosOS (4 cores, 8 GB RAM, 200 GB disk)
2) Install talosctl for your OS: https://docs.siderolabs.com/talos/v1.10/getting-started/talosctl 2) Install talosctl for your OS: https://docs.siderolabs.com/talos/v1.10/getting-started/talosctl
3) Generate Talos config 3) Generate Talos config
```bash 4) Navigate to tofu directory
# TODO: add commands
```
4) Edit the generated worker.yaml
- add google container registry mirror
- add modules from config generator
- add extramounts for persistent storage
- add kernel modules
5) Apply the config to the VMs
```bash ```bash
#TODO: add config apply commands cd 7project/tofu
````
5) Set IP addresses in environment variables
```bash
CONTROL_PLANE_IP=<control-plane-ip>
WORKER1_IP=<worker1-ip>
WORKER2_IP=<worker2-ip>
WORKER3_IP=<worker3-ip>
WORKER4_IP=<worker4-ip>
....
``` ```
6) Verify the cluster is up 6) Create config files
```bash ```bash
# change my-cluster to your desired cluster name
talosctl gen config my-cluster https://$CONTROL_PLANE_IP:6443
``` ```
7) Export kubeconfig 7) Edit the generated configs
```bash
# TODO: add export command Apply the following changes to `worker.yaml`:
1) Add mounts for persistent storage to `machine.kubelet.extraMounts` section:
```yaml
extraMounts:
- destination: /var/lib/longhorn
type: bindind.
source: /var/lib/longhorn
options:
- bind
- rshared
- rw
``` ```
2) Change `machine.install.image` to image with extra modules:
```yaml
image: factory.talos.dev/metal-installer/88d1f7a5c4f1d3aba7df787c448c1d3d008ed29cfb34af53fa0df4336a56040b:v1.11.1
```
or you can use latest image generated at https://factory.talos.dev with following options:
- Bare-metal machine
- your Talos os version
- amd64 architecture
- siderolabs/iscsi-tools
- siderolabs/util-linux-tools
- (Optionally) siderolabs/qemu-guest-agent
Then copy "Initial Installation" value and paste it to the image field.
3) Add docker registry mirror to `machine.registries.mirrors` section:
```yaml
registries:
mirrors:
docker.io:
endpoints:
- https://mirror.gcr.io
- https://registry-1.docker.io
```
8) Apply configs to the VMs
```bash
talosctl apply-config --insecure --nodes $CONTROL_PLANE_IP --file controlplane.yaml
talosctl apply-config --insecure --nodes $WORKER1_IP --file worker.yaml
talosctl apply-config --insecure --nodes $WORKER2_IP --file worker.yaml
talosctl apply-config --insecure --nodes $WORKER3_IP --file worker.yaml
talosctl apply-config --insecure --nodes $WORKER4_IP --file worker.yaml
```
9) Boostrap the cluster and retrieve kubeconfig
```bash
export TALOSCONFIG=$(pwd)/talosconfig
talosctl config endpoint https://$CONTROL_PLANE_IP:6443
talosctl config node $CONTROL_PLANE_IP
talosctl bootstrap
talosctl kubeconfig .
```
You can now use k8s client like https://headlamp.dev/ with the generated kubeconfig file.
### Install base services to the cluster
1) Copy and edit variables
### Install
1) Install base services to cluster
```bash ```bash
cd tofu
# copy and edit variables
cp terraform.tfvars.example terraform.tfvars cp terraform.tfvars.example terraform.tfvars
# authenticate to your cluster/cloud as needed, then: ```
- `metallb_ip_range` - set to range available in your network for load balancer services
- `mariadb_password` - password for internal mariadb user
- `mariadb_root_password` - password for root user
- `mariadb_user_name` - username for admin user
- `mariadb_user_host` - allowed hosts for admin user
- `mariadb_user_password` - password for admin user
- `metallb_maxscale_ip`, `metallb_service_ip`, `metallb_primary_ip`, `metallb_secondary_ip` - IPs for database
cluster,
set them to static IPs from the `metallb_ip_range`
- `s3_enabled`, `s3_bucket`, `s3_region`, `s3_endpoint`, `s3_key_id`, `s3_key_secret` - S3 compatible storage for
backups (optional)
- `phpmyadmin_enabled` - set to false if you want to disable phpmyadmin
- `rabbitmq-password` - password for RabbitMQ
- `cloudflare_account_id` - your Cloudflare account ID
- `cloudflare_api_token` - your Cloudflare API token with permissions to manage tunnels and DNS
- `cloudflare_email` - your Cloudflare account email
- `cloudflare_tunnel_name` - name for the tunnel
- `cloudflare_domain` - your domain name managed in Cloudflare
2) Deploy without Cloudflare module first
```bash
tofu init tofu init
tofu apply -exclude modules.cloudflare tofu apply -exclude modules.cloudflare
tofu apply
``` ```
3) Deploy rest of the modules
```bash
tofu apply
```
### Configure deployment
1) Create self-hosted runner with access to the cluster or make cluster publicly accessible
2) Change `jobs.deploy.runs-on` in `.github/workflows/deploy-prod.yml` and in `.github/workflows/deploy-pr.yaml` to your
runner label
3) Add variables to GitHub in repository settings:
- `PROD_DOMAIN` - base domain for deployments (e.g. ltrk.cz)
- `DEV_FRONTEND_BASE_DOMAIN` - base domain for your cloudflare pages
4) Add secrets to GitHub in repository settings:
- CLOUDFLARE_ACCOUNT_ID - same as in tofu/terraform.tfvars
- CLOUDFLARE_API_TOKEN - same as in tofu/terraform.tfvars
- DOCKER_USER - your docker registry username
- DOCKER_PASSWORD - your docker registry password
- KUBE_CONFIG - content of your kubeconfig file for the cluster
- PROD_DB_PASSWORD - same as MARIADB_PASSWORD
- PROD_RABBITMQ_PASSWORD - same as MARIADB_PASSWORD
- PROD_DB_ENCRYPTION_KEY - same as DB_ENCRYPTION_KEY
- MOJEID_CLIENT_ID
- MOJEID_CLIENT_SECRET
- BANKID_CLIENT_ID
- BANKID_CLIENT_SECRET
- CSAS_CLIENT_ID
- CSAS_CLIENT_SECRET
- SENTRY_DSN
- SMTP_HOST
- SMTP_PORT
- SMTP_USERNAME
- SMTP_PASSWORD
- SMTP_FROM
- UNIRATE_API_KEY
5) On Github open Actions tab, select "Deploy Prod" and run workflow manually
# TODO: REMOVE I guess
2) Deploy the app using Helm 2) Deploy the app using Helm
```bash ```bash
# Set the namespace # Set the namespace
kubectl create namespace myapp || true kubectl create namespace myapp || true
@@ -230,54 +434,43 @@ helm upgrade --install myapp charts/myapp-chart \
--set env.FRONTEND_URL="https://myapp.example.com" \ --set env.FRONTEND_URL="https://myapp.example.com" \
--set env.SECRET="CHANGE_ME_SECRET" --set env.SECRET="CHANGE_ME_SECRET"
``` ```
Adjust values to your registry and domain. The charts NOTES.txt includes additional examples.
3) Expose and access
- If using Cloudflare Tunnel or an ingress, configure DNS accordingly (see tofu/modules/cloudflare and deployment/tunnel.yaml).
- For quick testing without ingress:
```bash
kubectl -n myapp port-forward deploy/myapp-backend 8000:8000
kubectl -n myapp port-forward deploy/myapp-frontend 5173:80
```
### Verification
```bash
# Check pods
kubectl -n myapp get pods
# Backend health
curl -i http://127.0.0.1:8000/
# OpenAPI
open http://127.0.0.1:8000/docs
# Frontend (if port-forwarded)
open http://localhost:5173
```
## Testing Instructions ## Testing Instructions
The tests are located in 7project/backend/tests directory
If you want to test locally, you have to have the DB running locally as well (start the docker compose in /backend). The tests are located in 7project/backend/tests directory. All tests are run by GitHub actions on every pull request and
push to main.
See the workflow [here](../.github/workflows/run-tests.yml).
If you want to run the tests locally, the preferred is to use a [bash script](backend/test-with-ephemeral-mariadb.sh)
that will start a [test DB container](backend/docker-compose.test.yml) and remove it afterward.
```bash ```bash
cd backend cd 7project/backend
bash test-with-ephemeral-mariadb.sh
``` ```
### Unit Tests ### Unit Tests
There are only 3 basic unit tests, since our services logic is very simple
There are only 5 basic unit tests, since our services logic is very simple
```bash ```bash
pytest tests/test_unit_user_service.py bash test-with-ephemeral-mariadb.sh --only-unit
``` ```
### Integration Tests ### Integration Tests
There are 11 basic unit tests, testing the individual backend API logic
There are 9 basic unit tests, testing the individual backend API logic
```bash ```bash
pytest tests/test_integration_app.py bash test-with-ephemeral-mariadb.sh --only-integration
``` ```
### End-to-End Tests ### End-to-End Tests
There are 7 e2e tests testing more complex app logic
There are 7 e2e tests, testing more complex app logic
```bash ```bash
pytest tests/test_e2e.py bash test-with-ephemeral-mariadb.sh --only-e2e
``` ```
## Usage Examples ## Usage Examples
@@ -360,18 +553,18 @@ curl -H "Authorization: Bearer $TOKEN" http://127.0.0.1:8000/authenticated-route
> This information is used for individual grading. > This information is used for individual grading.
> Link to the specific commit on GitHub for each contribution. > Link to the specific commit on GitHub for each contribution.
| Task/Component | Assigned To | Status | Time Spent | Difficulty | Notes | | Task/Component | Assigned To | Status | Time Spent | Difficulty | Notes |
|-----------------------------------------------------------------------|-------------| ------------- |------------|------------| ----------- | |-------------------------------------------------------------------------------------------------------------------|-------------|----------------|------------|------------|-----------------------------------------------------------------------------------------------------|
| [Project Setup & Repository](https://github.com/dat515-2025/Group-8#) | Lukas | ✅ Complete | [X hours] | Medium | [Any notes] | | [Project Setup & Repository](https://github.com/dat515-2025/Group-8#) | Lukas | ✅ Complete | [X hours] | Medium | [Any notes] |
| [Design Document](https://github.com/dat515-2025/Group-8/blob/main/6design/design.md) | Both | ✅ Complete | 4 Hours | Easy | [Any notes] | | [Design Document](https://github.com/dat515-2025/Group-8/blob/main/6design/design.md) | Both | ✅ Complete | 4 Hours | Easy | [Any notes] |
| [Backend API Development](https://github.com/dat515-2025/Group-8/tree/main/7project/backend/app/api) | Dejan | ✅ Complete | 12 hours | Medium | [Any notes] | | [Backend API Development](https://github.com/dat515-2025/Group-8/tree/main/7project/backend/app/api) | Dejan | ✅ Complete | 12 hours | Medium | [Any notes] |
| [Database Setup & Models](https://github.com/dat515-2025/Group-8/tree/main/7project/backend/app/models) | Lukas | 🔄 In Progress | [X hours] | Medium | [Any notes] | | [Database Setup & Models](https://github.com/dat515-2025/Group-8/tree/main/7project/backend/app/models) | Lukas | ✅ Complete | [X hours] | Medium | [Any notes] |
| [Frontend Development](https://github.com/dat515-2025/Group-8/tree/main/7project/frontend) | Dejan | ✅ Complete | 17 hours | Medium | [Any notes] | | [Frontend Development](https://github.com/dat515-2025/Group-8/tree/main/7project/frontend) | Dejan | ✅ Complete | 17 hours | Medium | [Any notes] |
| [Docker Configuration](https://github.com/dat515-2025/Group-8/blob/main/7project/compose.yml) | Lukas | ✅ Complete | [X hours] | Easy | [Any notes] | | [Docker Configuration](https://github.com/dat515-2025/Group-8/blob/main/7project/compose.yml) | Lukas | ✅ Complete | 3 hours | Easy | [Any notes] |
| [Cloud Deployment](https://github.com/dat515-2025/Group-8/blob/main/7project/deployment/app-demo-deployment.yaml) | Lukas | ✅ Complete | [X hours] | Hard | [Any notes] | | [Cloud Deployment](https://github.com/dat515-2025/Group-8/blob/main/7project/deployment/app-demo-deployment.yaml) | Lukas | ✅ Complete | [X hours] | Hard | Using Talos cluster running in proxmox - easy snapshots etc. Frontend deployed at Cloudflare pages. |
| [Testing Implementation](https://github.com/dat515-2025/group-name) | Dejan | ✅ Complete | 16 hours | Medium | [Any notes] | | [Testing Implementation](https://github.com/dat515-2025/group-name) | Dejan | ✅ Complete | 16 hours | Medium | [Any notes] |
| [Documentation](https://github.com/dat515-2025/group-name) | Both | 🔄 In Progress | [X hours] | Easy | [Any notes] | | [Documentation](https://github.com/dat515-2025/group-name) | Both | 🔄 In Progress | [X hours] | Easy | [Any notes] |
| [Presentation Video](https://github.com/dat515-2025/group-name) | Both | ❌ Not Started | [X hours] | Medium | [Any notes] | | [Presentation Video](https://github.com/dat515-2025/group-name) | Both | ❌ Not Started | [X hours] | Medium | [Any notes] |
**Legend**: ✅ Complete | 🔄 In Progress | ⏳ Pending | ❌ Not Started **Legend**: ✅ Complete | 🔄 In Progress | ⏳ Pending | ❌ Not Started
@@ -381,14 +574,27 @@ curl -H "Authorization: Bearer $TOKEN" http://127.0.0.1:8000/authenticated-route
### [Lukáš] ### [Lukáš]
| Date | Activity | Hours | Description | ## Hour Sheet
|----------------|---------------------|------------|----------------------------------------------------|
| 4.10 to 10.10 | Initial Setup | 40 | Repository setup, project structure, cluster setup | **Name:** Lukáš Trkan
| 14.10 to 16.10 | Backend Development | 12 | Implemented user authentication - oauth |
| 8.10 to 12.10 | CI/CD | 10 | Created database schema and models | | Date | Activity | Hours | Description | Representative Commit / PR |
| [Date] | Testing | [X.X] | Unit tests for API endpoints | |:----------------|:----------------------------|:--------|:------------------------------------------------------------------------------------|:------------------------------------------------------|
| [Date] | Documentation | [X.X] | Updated README and design doc | | 18.9. - 19.9. | Initial Setup & Design | 40 | Repository init, system design diagrams, basic Terraform setup | `feat(infrastructure): add basic terraform resources` |
| **Total** | | **[XX.X]** | | | 20.9. - 5.10. | Core Infrastructure & CI/CD | 12 | K8s setup (ArgoCD), CI/CD workflows, RabbitMQ, Redis, Celery workers, DB migrations | `PR #2`, `feat(infrastructure): add rabbitmq cluster` |
| 6.10. - 9.10. | Frontend Infra & DB | 5 | Deployed frontend to Cloudflare, setup metrics, created database models | `PR #16` (Cloudflare), `PR #19` (DB structure) |
| 10.10. - 11.10. | Backend | 5 | Implemented OAuth support (MojeID, BankID) | `feat(auth): add support for OAuth and MojeID` |
| 12.10. | Infrastructure | 2 | Added database backups | `feat(infrastructure): add backups` |
| 16.10. | Infrastructure | 4 | Implemented secrets management, fixed deployment/env variables | `PR #29` (Deployment envs) |
| 17.10. | Monitoring | 1 | Added Sentry logging | `feat(app): add sentry loging` |
| 21.10. - 22.10. | Backend | 8 | Added ČSAS bank connection | `PR #32` (Fix React OAuth) |
| 29.10. - 30.10. | Backend | 5 | Implemented transaction encryption, add bank scraping | `PR #39` (CSAS Scraping) |
| 30.10. | Monitoring | 6 | Implemented Loki logging and basic Prometheus metrics | `PR #42` (Prometheus metrics) |
| 9.11. | Monitoring | 2 | Added custom Prometheus metrics | `PR #46` (Prometheus custom metrics) |
| 11.11. | Tests | 1 | Investigated and fixed broken Pytest environment | `fix(tests): set pytest env` |
| 11.11. - 12.11. | Features & Deployment | 6 | Added cron support, email sender service, updated workers & image | `PR #49` (Email), `PR #50` (Update workers) |
| 18.9 - 14.11 | Documentation | 8 | Updated report.md, design docs, and tfvars.example | `Create design.md`, `update report` |
| **Total** | | **105** | | |
### Dejan ### Dejan
@@ -405,7 +611,6 @@ curl -H "Authorization: Bearer $TOKEN" http://127.0.0.1:8000/authenticated-route
| 4.11 to 6.11 | Frontend | 6 | Fixes, Improved UI, added support for mobile devices | | 4.11 to 6.11 | Frontend | 6 | Fixes, Improved UI, added support for mobile devices |
| **Total** | | **63** | | | **Total** | | **63** | |
### Group Total: [XXX.X] hours ### Group Total: [XXX.X] hours
--- ---
@@ -418,15 +623,34 @@ curl -H "Authorization: Bearer $TOKEN" http://127.0.0.1:8000/authenticated-route
### Challenges Faced ### Challenges Faced
#### Slow cluster performance
This was caused by single SATA SSD disk running all VMs. This was solved by adding second NVMe disk just for Talos VMs.
[Describe the main challenges and how you overcame them] [Describe the main challenges and how you overcame them]
### If We Did This Again ### If We Did This Again
#### Different framework
FastAPI lacks usable build in support for database migrations and implementing Alembic was a bit tricky.
Tricky was also integrating FastAPI auth system with React frontend, since there is no official project template.
Using .NET (which we considered initially) would probably solve these issues.
[What would you do differently? What worked well that you'd keep?] [What would you do differently? What worked well that you'd keep?]
### Individual Growth ### Individual Growth
#### [Team Member 1 Name] #### [Lukas]
This course finally forced me to learn kubernetes (been on by TODO list for at least 3 years).
I had some prior experience with terraform/opentofu from work but this improved by understanding of it.
The biggest challenge for me was time tracking since I am used to tracking to projects, not to tasks.
(I am bad even at that :) ).
It was also interesting experience to be the one responsible for the initial project structure/design/setup
used not only by myself.
[Personal reflection on growth, challenges, and learning] [Personal reflection on growth, challenges, and learning]
@@ -438,4 +662,4 @@ curl -H "Authorization: Bearer $TOKEN" http://127.0.0.1:8000/authenticated-route
--- ---
**Report Completion Date**: [Date] **Report Completion Date**: [Date]
**Last Updated**: 15.10.2025 **Last Updated**: 13.11.2025

View File

@@ -105,14 +105,6 @@ module "database" {
s3_key_secret = var.s3_key_secret s3_key_secret = var.s3_key_secret
} }
#module "argocd" {
# source = "${path.module}/modules/argocd"
# depends_on = [module.storage, module.loadbalancer, module.cloudflare]
# argocd_admin_password = var.argocd_admin_password
# cloudflare_domain = var.cloudflare_domain
#}
#module "redis" { #module "redis" {
# source = "${path.module}/modules/redis" # source = "${path.module}/modules/redis"
# depends_on = [module.storage] # depends_on = [module.storage]

View File

@@ -1,14 +0,0 @@
apiVersion: networking.cfargotunnel.com/v1alpha1
kind: TunnelBinding
metadata:
name: argocd-tunnel-binding
namespace: argocd
subjects:
- name: argocd-server
spec:
target: https://argocd-server.argocd.svc.cluster.local
fqdn: argocd.${base_domain}
noTlsVerify: true
tunnelRef:
kind: ClusterTunnel
name: cluster-tunnel

View File

@@ -1,39 +0,0 @@
terraform {
required_providers {
kubectl = {
source = "gavinbunney/kubectl"
version = "1.19.0"
}
helm = {
source = "hashicorp/helm"
version = "3.0.2"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "2.38.0"
}
}
}
resource "kubernetes_namespace" "argocd" {
metadata {
name = "argocd"
}
}
resource "helm_release" "argocd" {
name = "argocd"
namespace = "argocd"
repository = "https://argoproj.github.io/argo-helm"
chart = "argo-cd"
depends_on = [kubernetes_namespace.argocd]
}
resource "kubectl_manifest" "argocd-tunnel-bind" {
depends_on = [helm_release.argocd]
yaml_body = templatefile("${path.module}/argocd-ui.yaml", {
base_domain = var.cloudflare_domain
})
}

View File

@@ -1,12 +0,0 @@
variable "argocd_admin_password" {
type = string
nullable = false
sensitive = true
description = "ArgoCD admin password"
}
variable "cloudflare_domain" {
type = string
default = "Base cloudflare domain, e.g. example.com"
nullable = false
}

View File

@@ -1,4 +1,4 @@
apiVersion: v2 apiVersion: v2
name: maxscale-helm name: maxscale-helm
version: 1.0.14 version: 1.0.15
description: Helm chart for MaxScale related Kubernetes manifests description: Helm chart for MaxScale related Kubernetes manifests

View File

@@ -154,6 +154,13 @@ spec:
memory: 128Mi memory: 128Mi
limits: limits:
memory: 1Gi memory: 1Gi
monitor:
interval: 2s
cooperativeMonitoring: majority_of_all
params:
auto_failover: "true"
auto_rejoin: "true"
switchover_on_low_disk_space: "true"
livenessProbe: livenessProbe:
initialDelaySeconds: 20 initialDelaySeconds: 20

View File

@@ -59,7 +59,7 @@ resource "helm_release" "mariadb-operator" {
resource "helm_release" "maxscale_helm" { resource "helm_release" "maxscale_helm" {
name = "maxscale-helm" name = "maxscale-helm"
chart = "${path.module}/charts/maxscale-helm" chart = "${path.module}/charts/maxscale-helm"
version = "1.0.14" version = "1.0.15"
depends_on = [helm_release.mariadb-operator-crds, kubectl_manifest.secrets] depends_on = [helm_release.mariadb-operator-crds, kubectl_manifest.secrets]
timeout = 3600 timeout = 3600

View File

@@ -1,5 +1,3 @@
# Example terraform.tfvars for MariaDB and MetalLB
metallb_ip_range = "10.80.0.100-10.80.0.240" metallb_ip_range = "10.80.0.100-10.80.0.240"
# Secret configuration (use strong passwords; do not commit real secrets) # Secret configuration (use strong passwords; do not commit real secrets)
@@ -11,13 +9,19 @@ mariadb_user_name = "example_user"
mariadb_user_host = "%" mariadb_user_host = "%"
mariadb_user_password = "example_user_password" mariadb_user_password = "example_user_password"
# MetalLB IPs for services (optional) # MetalLB IPs for services
metallb_maxscale_ip = "10.80.0.219" metallb_maxscale_ip = "10.80.0.219"
metallb_service_ip = "10.80.0.120" metallb_service_ip = "10.80.0.120"
metallb_primary_ip = "10.80.0.130" metallb_primary_ip = "10.80.0.130"
metallb_secondary_ip = "10.80.0.131" metallb_secondary_ip = "10.80.0.131"
# phpMyAdmin toggle s3_enabled = false
s3_bucket = "cluster"
s3_region = "us-east-1"
s3_endpoint = "your.s3.endpoint.example"
s3_key_id = "your_s3_key_id"
s3_key_secret = "your_s3_key_secret"
phpmyadmin_enabled = true phpmyadmin_enabled = true
cloudflare_account_id = "CHANGE_ME" cloudflare_account_id = "CHANGE_ME"
@@ -26,4 +30,5 @@ cloudflare_email = "CHANGE_ME"
cloudflare_tunnel_name = "CHANGE_ME" cloudflare_tunnel_name = "CHANGE_ME"
cloudflare_domain = "CHANGE_ME" cloudflare_domain = "CHANGE_ME"
rabbitmq-password = "CHANGE_ME"