diff --git a/.github/workflows/ansible-deploy.yml b/.github/workflows/ansible-deploy.yml
new file mode 100644
index 0000000000..3a935b07d8
--- /dev/null
+++ b/.github/workflows/ansible-deploy.yml
@@ -0,0 +1,75 @@
+name: Ansible Deployment
+
+on:
+ push:
+ branches: [master]
+ paths:
+ - "app_python/ansible/**"
+ - "!app_python/ansible/docs/**"
+ - ".github/workflows/ansible-deploy.yml"
+ pull_request:
+ branches: [master]
+ paths:
+ - "app_python/ansible/**"
+
+jobs:
+ lint:
+ name: Ansible Lint
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+
+ - name: Install dependencies
+ run: |
+ pip install ansible ansible-lint
+
+ - name: Run ansible-lint
+ run: |
+ cd app_python/ansible
+ ansible-lint playbooks/*.yml
+
+ deploy:
+ name: Deploy Application
+ needs: lint
+ runs-on: ubuntu-latest
+ if: github.event_name == 'push'
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+
+ - name: Install Ansible and Docker modules
+ run: |
+ pip install ansible
+ ansible-galaxy collection install community.docker
+
+ - name: Setup SSH
+ run: |
+ mkdir -p ~/.ssh
+ echo "${{ secrets.SSH_PRIVATE_KEY }}" > ~/.ssh/id_rsa
+ chmod 600 ~/.ssh/id_rsa
+ ssh-keyscan -H ${{ secrets.VM_HOST }} >> ~/.ssh/known_hosts
+
+ - name: Prepare Vault password
+ run: echo "${{ secrets.ANSIBLE_VAULT_PASSWORD }}" > /tmp/vault_pass
+
+ - name: Deploy with Ansible
+ run: |
+ cd app_python/ansible
+ ansible-playbook playbooks/deploy.yml \
+ -i inventory/hosts.ini \
+ --vault-password-file /tmp/vault_pass
+
+ - name: Verify Application
+ run: |
+ sleep 10
+ curl -f http://${{ secrets.VM_HOST }}:8000 || exit 1
+ curl -f http://${{ secrets.VM_HOST }}:8000/health || exit 1
diff --git a/.github/workflows/python-ci.yml b/.github/workflows/python-ci.yml
new file mode 100644
index 0000000000..7ccdb5a79f
--- /dev/null
+++ b/.github/workflows/python-ci.yml
@@ -0,0 +1,100 @@
+name: Python CI
+
+on:
+ push:
+ branches: [master, lab03]
+ paths:
+ - "app_python/**"
+ - ".github/workflows/python-ci.yml"
+ pull_request:
+ branches: [master]
+ paths:
+ - "app_python/**"
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: app_python
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.13"
+ cache: "pip"
+ cache-dependency-path: app_python/requirements.txt
+
+ - name: Install dependencies
+ run: pip install -r requirements.txt
+
+ - name: Lint with flake8
+ run: |
+ pip install flake8
+ flake8 app.py --count --select=E9,F63,F7,F82 --show-source --statistics
+ flake8 app.py --count --exit-zero --max-complexity=10 --statistics
+
+ - name: Test with pytest
+ run: pytest tests/ -v
+
+ security:
+ runs-on: ubuntu-latest
+ needs: test
+ if: always()
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.13"
+ cache: "pip"
+ cache-dependency-path: app_python/requirements.txt
+
+ - name: Install dependencies
+ run: pip install -r app_python/requirements.txt
+
+ - name: Install Snyk CLI
+ run: npm install -g snyk
+
+ - name: Run Snyk test
+ run: snyk test --file=app_python/requirements.txt --severity-threshold=high
+ env:
+ SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
+
+ docker:
+ needs: test
+ runs-on: ubuntu-latest
+ if: github.event_name == 'push' && github.ref == 'refs/heads/master'
+ defaults:
+ run:
+ working-directory: app_python
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Login to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_TOKEN }}
+
+ - name: Generate version (CalVer)
+ id: version
+ run: echo "version=$(date +%Y.%m.%d)" >> $GITHUB_OUTPUT
+
+ - name: Build and push
+ uses: docker/build-push-action@v5
+ with:
+ context: app_python
+ push: true
+ tags: |
+ ${{ secrets.DOCKER_USERNAME }}/devops-info-service:latest
+ ${{ secrets.DOCKER_USERNAME }}/devops-info-service:${{ steps.version.outputs.version }}
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
+
diff --git a/.gitignore b/.gitignore
index 30d74d2584..f9362757fa 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,6 @@
-test
\ No newline at end of file
+app_python/venv/
+__pycache__/
+*.py[cod]
+.DS_Store
+*.log
+.env
diff --git a/README.md b/README.md
deleted file mode 100644
index 371d51f456..0000000000
--- a/README.md
+++ /dev/null
@@ -1,271 +0,0 @@
-# DevOps Engineering: Core Practices
-
-[](#labs)
-[](#exam-alternative)
-[](#course-roadmap)
-
-Master **production-grade DevOps practices** through hands-on labs. Build, containerize, deploy, monitor, and scale applications using industry-standard tools.
-
----
-
-## Quick Start
-
-1. **Fork** this repository
-2. **Clone** your fork locally
-3. **Start with Lab 1** and progress sequentially
-4. **Submit PRs** for each lab (details below)
-
----
-
-## Course Roadmap
-
-| Week | Lab | Topic | Key Technologies |
-|------|-----|-------|------------------|
-| 1 | 1 | Web Application Development | Python/Go, Best Practices |
-| 2 | 2 | Containerization | Docker, Multi-stage Builds |
-| 3 | 3 | Continuous Integration | GitHub Actions, Snyk |
-| 4 | 4 | Infrastructure as Code | Terraform, Cloud Providers |
-| 5 | 5 | Configuration Management | Ansible Basics |
-| 6 | 6 | Continuous Deployment | Ansible Advanced |
-| 7 | 7 | Logging | Promtail, Loki, Grafana |
-| 8 | 8 | Monitoring | Prometheus, Grafana |
-| 9 | 9 | Kubernetes Basics | Minikube, Deployments, Services |
-| 10 | 10 | Helm Charts | Templating, Hooks |
-| 11 | 11 | Secrets Management | K8s Secrets, HashiCorp Vault |
-| 12 | 12 | Configuration & Storage | ConfigMaps, PVCs |
-| 13 | 13 | GitOps | ArgoCD |
-| 14 | 14 | Progressive Delivery | Argo Rollouts |
-| 15 | 15 | StatefulSets | Persistent Storage, Headless Services |
-| 16 | 16 | Cluster Monitoring | Kube-Prometheus, Init Containers |
-| — | **Exam Alternative Labs** | | |
-| 17 | 17 | Edge Deployment | Fly.io, Global Distribution |
-| 18 | 18 | Decentralized Storage | 4EVERLAND, IPFS, Web3 |
-
----
-
-## Grading
-
-### Grade Composition
-
-| Component | Weight | Points |
-|-----------|--------|--------|
-| **Labs (16 required)** | 80% | 160 pts |
-| **Final Exam** | 20% | 40 pts |
-| **Bonus Tasks** | Extra | +40 pts max |
-| **Total** | 100% | 200 pts |
-
-### Exam Alternative
-
-Don't want to take the exam? Complete **both** bonus labs:
-
-| Lab | Topic | Points |
-|-----|-------|--------|
-| **Lab 17** | Fly.io Edge Deployment | 20 pts |
-| **Lab 18** | 4EVERLAND & IPFS | 20 pts |
-
-**Requirements:**
-- Complete both labs (17 + 18 = 40 pts, replaces exam)
-- Minimum 16/20 on each lab
-- Deadline: **1 week before exam date**
-- Can still take exam if you need more points for desired grade
-
-
-📊 Grade Scale
-
-| Grade | Points | Percentage |
-|-------|--------|------------|
-| **A** | 180-200+ | 90-100% |
-| **B** | 150-179 | 75-89% |
-| **C** | 120-149 | 60-74% |
-| **D** | 0-119 | 0-59% |
-
-**Minimum to Pass:** 120 points (60%)
-
-
-
-
-📈 Grade Examples
-
-**Scenario 1: Labs + Exam**
-```
-Labs: 16 × 9 = 144 pts
-Bonus: 5 labs × 2.5 = 12.5 pts
-Exam: 35/40 pts
-Total: 191.5 pts = 96% (A)
-```
-
-**Scenario 2: Labs + Exam Alternative**
-```
-Labs: 16 × 9 = 144 pts
-Bonus: 8 labs × 2.5 = 20 pts
-Lab 17: 18 pts
-Lab 18: 17 pts
-Total: 199 pts = 99.5% (A)
-```
-
-
-
----
-
-## Lab Structure
-
-Each lab is worth **10 points** (main tasks) + **2.5 points** (bonus).
-
-- **Minimum passing score:** 6/10 per lab
-- **Late submissions:** Max 6/10 (within 1 week)
-- **Very late (>1 week):** Not accepted
-
-
-📋 Lab Categories
-
-**Foundation (Labs 1-2)**
-- Web app development
-- Docker containerization
-
-**CI/CD & Infrastructure (Labs 3-4)**
-- GitHub Actions
-- Terraform
-
-**Configuration Management (Labs 5-6)**
-- Ansible playbooks and roles
-
-**Observability (Labs 7-8)**
-- Loki logging stack
-- Prometheus monitoring
-
-**Kubernetes Core (Labs 9-12)**
-- K8s basics, Helm
-- Secrets, ConfigMaps
-
-**Advanced Kubernetes (Labs 13-16)**
-- ArgoCD, Argo Rollouts
-- StatefulSets, Monitoring
-
-**Exam Alternative (Labs 17-18)**
-- Fly.io, 4EVERLAND/IPFS
-
-
-
----
-
-## How to Submit
-
-```bash
-# 1. Create branch
-git checkout -b lab1
-
-# 2. Complete lab tasks
-
-# 3. Commit and push
-git add .
-git commit -m "Complete lab1"
-git push -u origin lab1
-
-# 4. Create TWO Pull Requests:
-# PR #1: your-fork:lab1 → course-repo:master
-# PR #2: your-fork:lab1 → your-fork:master
-```
-
-
-📝 Submission Checklist
-
-- [ ] All main tasks completed
-- [ ] Documentation files created
-- [ ] Screenshots where required
-- [ ] Code tested and working
-- [ ] Markdown validated ([linter](https://dlaa.me/markdownlint/))
-- [ ] Both PRs created
-
-
-
----
-
-## Resources
-
-
-🛠️ Required Tools
-
-| Tool | Purpose |
-|------|---------|
-| Git | Version control |
-| Docker | Containerization |
-| kubectl | Kubernetes CLI |
-| Helm | K8s package manager |
-| Minikube | Local K8s cluster |
-| Terraform | Infrastructure as Code |
-| Ansible | Configuration management |
-
-
-
-
-📚 Documentation Links
-
-**Core:**
-- [Docker](https://docs.docker.com/)
-- [Kubernetes](https://kubernetes.io/docs/)
-- [Helm](https://helm.sh/docs/)
-
-**CI/CD:**
-- [GitHub Actions](https://docs.github.com/en/actions)
-- [Terraform](https://www.terraform.io/docs)
-- [Ansible](https://docs.ansible.com/)
-
-**Observability:**
-- [Prometheus](https://prometheus.io/docs/)
-- [Grafana](https://grafana.com/docs/)
-
-**Advanced:**
-- [ArgoCD](https://argo-cd.readthedocs.io/)
-- [Argo Rollouts](https://argoproj.github.io/argo-rollouts/)
-- [HashiCorp Vault](https://developer.hashicorp.com/vault/docs)
-
-
-
-
-💡 Tips for Success
-
-1. **Start early** - Don't wait until deadline
-2. **Read instructions fully** before starting
-3. **Test everything** before submitting
-4. **Document as you go** - Don't leave it for the end
-5. **Ask questions early** - Don't wait until last minute
-6. **Use proper Git workflow** - Branches, commits, PRs
-
-
-
-
-🔧 Common Issues
-
-**Docker:**
-- Daemon not running → Start Docker Desktop
-- Permission denied → Add user to docker group
-
-**Minikube:**
-- Won't start → Try `--driver=docker`
-- Resource issues → Allocate more memory/CPU
-
-**Kubernetes:**
-- ImagePullBackOff → Check image name/registry
-- CrashLoopBackOff → Check logs: `kubectl logs `
-
-
-
----
-
-## Course Completion
-
-After completing all 16 core labs (+ optional Labs 17-18), you'll have:
-
-✅ Full-stack DevOps expertise
-✅ Production-ready portfolio with 16-18 projects
-✅ Container and Kubernetes mastery
-✅ CI/CD pipeline experience
-✅ Infrastructure as Code skills
-✅ Monitoring and observability knowledge
-✅ GitOps workflow experience
-
----
-
-**Ready to begin? Start with [Lab 1](labs/lab01.md)!**
-
-Questions? Check the course Moodle page or ask during office hours.
diff --git a/app_python/.dockerignore b/app_python/.dockerignore
new file mode 100644
index 0000000000..8f31d7babe
--- /dev/null
+++ b/app_python/.dockerignore
@@ -0,0 +1,14 @@
+venv
+__pycache__
+*.py[cod]
+*.pyc
+*.pyo
+*.pyd
+*.log
+.env
+.env.local
+.git
+.gitignore
+docs
+tests
+.DS_Store
diff --git a/app_python/.gitignore b/app_python/.gitignore
new file mode 100644
index 0000000000..e056cde1ba
--- /dev/null
+++ b/app_python/.gitignore
@@ -0,0 +1,31 @@
+# Python
+__pycache__/
+*.py[cod]
+*.pyc
+*.pyd
+*.pyo
+*.so
+venv/
+.venv/
+env/
+.idea/
+.vscode/
+
+# Logs
+*.log
+logs/
+
+# OS
+.DS_Store
+Thumbs.db
+
+# Virtual environment
+venv/
+ENV/
+
+# Jupyter
+.ipynb_checkpoints
+
+# Environment variables
+.env
+.env.local
diff --git a/app_python/Dockerfile b/app_python/Dockerfile
new file mode 100644
index 0000000000..71f283b51a
--- /dev/null
+++ b/app_python/Dockerfile
@@ -0,0 +1,32 @@
+FROM python:3.13-slim
+
+ENV DEBIAN_FRONTEND=noninteractive
+ENV PYTHONDONTWRITEBYTECODE=1
+ENV PYTHONUNBUFFERED=1
+
+RUN apt-get update \
+ && apt-get install -y --no-install-recommends gcc ca-certificates curl \
+ && rm -rf /var/lib/apt/lists/*
+
+RUN groupadd -r appgroup && useradd -r -g appgroup -d /home/appuser -m -s /sbin/nologin appuser
+
+WORKDIR /app
+
+COPY requirements.txt .
+
+RUN python -m pip install --upgrade pip \
+ && pip install --no-cache-dir -r requirements.txt
+
+COPY app.py .
+
+RUN chown -R appuser:appgroup /app
+
+USER appuser
+
+EXPOSE 5000
+
+ENV HOST=0.0.0.0
+ENV PORT=5000
+
+# Default command
+CMD ["python", "app.py"]
diff --git a/app_python/README.md b/app_python/README.md
new file mode 100644
index 0000000000..4810e76330
--- /dev/null
+++ b/app_python/README.md
@@ -0,0 +1,216 @@
+# DevOps Info Service
+[](https://github.com/versceana/DevOps-Core-Course/actions/workflows/ansible-deploy.yml)
+
+A small web service that returns service, system and runtime information.
+Implemented for the DevOps Core Course (Lab 1) using **Python + Flask**.
+
+---
+
+## Overview
+
+This service exposes two endpoints:
+
+- `GET /` — Returns detailed information about the service, system, runtime and request.
+- `GET /health` — Lightweight health check for monitoring/probes.
+- `GET /visits` - Returns visit count.
+---
+
+## Prerequisites
+
+- **macOS** with Python (3.11+ recommended; tested on 3.13)
+- `git`
+- `curl` or `http` (HTTPie)
+- (optional) `jq` for pretty-printing JSON
+
+---
+
+## Installation (macOS)
+
+From the repository root:
+
+```bash
+cd DevOps-Core-Course/app_python
+
+# create a virtual environment inside the project (recommended)
+python -m venv venv
+source venv/bin/activate # macOS / Linux
+
+# install pinned dependencies
+pip install -r requirements.txt
+```
+
+> We keep the virtual environment inside `app_python/venv/`. This directory is listed in `.gitignore` and will not be committed.
+
+---
+
+## Running the application
+
+### Basic
+
+```bash
+# default: binds to 0.0.0.0:5000
+python app.py
+```
+
+### Custom host / port / debug
+
+```bash
+# custom port
+PORT=8000 python app.py
+
+# custom host and port
+HOST=127.0.0.1 PORT=3000 python app.py
+
+# enable debug mode
+DEBUG=true python app.py
+```
+
+If port `5000` is already used (common on macOS), start on a different port (e.g., `8000`).
+
+---
+
+## API
+
+### `GET /`
+
+Returns a JSON object with keys: `service`, `system`, `runtime`, `request`, `endpoints`.
+
+**Example request**
+
+```bash
+curl -s http://127.0.0.1:8000/ | jq .
+```
+
+**Example (sample output — will vary per machine)**
+
+```json
+{
+ "service": {
+ "name": "devops-info-service",
+ "version": "1.0.0",
+ "description": "DevOps course info service",
+ "framework": "Flask"
+ },
+ "system": {
+ "hostname": "MacBook-Pro-Diana-2.local",
+ "platform": "Darwin",
+ "platform_version": "25.2.0",
+ "architecture": "arm64",
+ "cpu_count": 8,
+ "python_version": "3.13.0"
+ },
+ "runtime": {
+ "uptime_seconds": 355,
+ "uptime_human": "0 hours, 5 minutes",
+ "current_time": "2026-01-28T19:52:46.995203+00:00",
+ "timezone": "UTC"
+ },
+ "request": {
+ "client_ip": "127.0.0.1",
+ "user_agent": "curl/7.87.0",
+ "method": "GET",
+ "path": "/"
+ },
+ "endpoints": [
+ { "path": "/", "method": "GET", "description": "Service information" },
+ { "path": "/health", "method": "GET", "description": "Health check" }
+ ]
+}
+```
+
+### `GET /health`
+
+Simple health JSON used for monitoring / readiness/liveness probes.
+
+**Example**
+
+```bash
+curl -s http://127.0.0.1:8000/health | jq .
+```
+
+**Sample**
+
+```json
+{
+ "status": "healthy",
+ "timestamp": "2026-01-28T19:52:29.081981+00:00",
+ "uptime_seconds": 337
+}
+```
+
+**Status codes**
+
+- `200 OK` — healthy
+- `500` — internal server error (if thrown)
+
+---
+
+## Configuration
+
+| Environment variable | Default | Description |
+| -------------------- | --------- | ------------------------------------ |
+| `HOST` | `0.0.0.0` | Host address to bind to |
+| `PORT` | `5000` | Port to listen on |
+| `DEBUG` | `false` | Enable Flask debug mode (true/false) |
+
+Use `PORT=8000` if `5000` is already in use on your machine.
+
+---
+
+## Testing
+
+Quick checks:
+
+```bash
+# main endpoint
+curl -s http://127.0.0.1:8000/ | jq .
+
+# health
+curl -s http://127.0.0.1:8000/health | jq .
+```
+
+If `jq` is not installed, use:
+
+```bash
+curl -s http://127.0.0.1:8000/ | python -m json.tool
+```
+
+---
+
+## Project layout
+
+```
+app_python/
+├── app.py # Main application
+├── requirements.txt # Pinned dependencies
+├── venv/ # (local) virtual environment - ignored by git
+├── .gitignore # Git ignore rules
+├── README.md # This file
+├── tests/ # Unit tests (placeholder)
+│ └── __init__.py
+└── docs/
+ ├── LAB01.md # Lab submission notes
+ └── screenshots/ # PNG screenshots for submission
+```
+
+---
+
+## Notes & caveats
+
+- This project uses Flask's development server for lab/demo purposes. For production or grading polish, a WSGI server (Gunicorn/uvicorn) + proper logging should be used.
+- The code attempts to use UTC timestamps. There may be a `DeprecationWarning` about `utcnow()` on some Python versions; this does not affect lab functionality. A later commit will make datetime objects timezone-aware to remove the warning.
+
+---
+
+## Contributing / Submission
+
+1. Implement changes on branch `lab01`.
+2. Commit and push to your fork.
+3. Create PR: `your-fork:lab01` → `inno-devops-labs/DevOps-Core-Course:master`.
+4. Include screenshots from `app_python/docs/screenshots/` in the PR description.
+
+---
+
+## License
+
+Educational project for DevOps Core Course.
diff --git a/app_python/ansible/.ansible-lint b/app_python/ansible/.ansible-lint
new file mode 100644
index 0000000000..824881ce99
--- /dev/null
+++ b/app_python/ansible/.ansible-lint
@@ -0,0 +1,2 @@
+skip_list:
+ - internal-error
diff --git a/app_python/ansible/.vault_pass b/app_python/ansible/.vault_pass
new file mode 100644
index 0000000000..016967a565
--- /dev/null
+++ b/app_python/ansible/.vault_pass
@@ -0,0 +1 @@
+D8c7ubxO
diff --git a/app_python/ansible/ansible.cfg b/app_python/ansible/ansible.cfg
new file mode 100644
index 0000000000..0b28968a12
--- /dev/null
+++ b/app_python/ansible/ansible.cfg
@@ -0,0 +1,13 @@
+[defaults]
+inventory = inventory/hosts.ini
+roles_path = roles
+host_key_checking = False
+retry_files_enabled = False
+private_key_file = ~/.ssh/id_rsa
+# Optional: use a vault password file if you create one
+# vault_password_file = .vault_pass
+
+[privilege_escalation]
+become = True
+become_method = sudo
+become_user = root
diff --git a/app_python/ansible/group_vars/all.yml b/app_python/ansible/group_vars/all.yml
new file mode 100644
index 0000000000..97b49cd335
--- /dev/null
+++ b/app_python/ansible/group_vars/all.yml
@@ -0,0 +1,20 @@
+$ANSIBLE_VAULT;1.1;AES256
+34613063633735306531626433333239653830383062323934303364363161376263646531653334
+3766313935313564323331663032663061656632386633350a396139616134663633613231313264
+35353465313263366361316166326362653965666264383464303066623338666166643639313930
+6461333739323534650a333932656432623661633131643631356466656538316537356430393833
+31386364303336393339326232353665393465633838333738626634363363313433303138353634
+64636537656566633163326431626339333263656435646136626464363932353564623230623732
+62623365643134323333336137643239386264623334633937313834666161376636363537626264
+37613938316337333166333566313861316535306633346261363730313561636438323634393738
+34316432386263343561303961616432313930663235306536653566653361303039303936323938
+30303639663638383739633132376432373439613066376663326665306137306630383330396331
+32656332383739636465363363366161653363316532313031306237396462393830633334666464
+31633537613066616139326462313231336131656662346439346335386531343139663936316161
+61633062613232633364633966326536363361633639643165366633323063656337303430383837
+30303261343163633936613137316561616238333934346662636265623032346131383738636538
+33633438646166626131366239353264363934653534373834386361373632303864666430636561
+37636632663238366637363636626631303739613264303238633430323931303262613130366666
+64383839333930666536303565396532313137366438353966393531643332366665623137663064
+36356563316366343162613831323061636161636130376139373661653961636466666439663633
+653836303332383466663763333065366336
diff --git a/app_python/ansible/inventory/aws_ec2.yml b/app_python/ansible/inventory/aws_ec2.yml
new file mode 100644
index 0000000000..7549e8104b
--- /dev/null
+++ b/app_python/ansible/inventory/aws_ec2.yml
@@ -0,0 +1,15 @@
+plugin: amazon.aws.aws_ec2
+regions:
+ - us-east-1
+filters:
+ instance-state-name: running
+ tag:Name: lab4-vm
+keyed_groups:
+ - key: tags.Name
+ prefix: tag
+groups:
+ webservers: "true"
+compose:
+ ansible_host: public_ip_address
+ ansible_user: "'ubuntu'"
+ ansible_ssh_private_key_file: "'/Users/dianayakupova/Study/DevOps/lab01/DevOps-Core-Course/ansible/keys/labsuser.pem'"
diff --git a/app_python/ansible/inventory/hosts.ini b/app_python/ansible/inventory/hosts.ini
new file mode 100644
index 0000000000..67a6fd42e5
--- /dev/null
+++ b/app_python/ansible/inventory/hosts.ini
@@ -0,0 +1,5 @@
+[webservers]
+vm1 ansible_host=50.16.50.40 ansible_user=ubuntu ansible_ssh_private_key_file=keys/labsuser.pem
+
+[webservers:vars]
+ansible_python_interpreter=/usr/bin/python3
diff --git a/app_python/ansible/inventory/hosts.ini.bak b/app_python/ansible/inventory/hosts.ini.bak
new file mode 100644
index 0000000000..a3b9d40411
--- /dev/null
+++ b/app_python/ansible/inventory/hosts.ini.bak
@@ -0,0 +1,5 @@
+[webservers]
+vm1 ansible_host=34.207.215.1 ansible_user=ubuntu ansible_ssh_private_key_file=keys/labsuser.pem
+
+[webservers:vars]
+ansible_python_interpreter=/usr/bin/python3
diff --git a/app_python/ansible/keys/labsuser.pem b/app_python/ansible/keys/labsuser.pem
new file mode 100644
index 0000000000..7381a84c21
--- /dev/null
+++ b/app_python/ansible/keys/labsuser.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEA6tKTMVcjZ8QSwuGLHI43Iu8daTOKuHopZbnx/iKzR8cJ5JRw
+BPd8YDVhqhRotxX9oujrvi/pJ7YsItP4OJPSna1URU8EJpnhgLqxkwMa8pNGDm1z
+QGssFPmGDtzLKN4PGYEISeX6TQ9sgjaXZ2WzTsz7nnuZDWbEVvQ5iAGztY94H7Xp
+OMO0aiFkuggv1nHyNfAttFEparN2A7gBP/JVYNVrOazbFM0mwGGJsA1MSLYS3cij
+Cy4aokXBdc3f4UPpH8wNSJe50sq8/TP2h+/7lZ/xt9fkiwu609fndzp6p+8G02ky
+Xhc6+qIi9aaObAqsLaFxXcwpEt1IKRqJj7JTbwIDAQABAoIBAHmWWxy7L9v0zHoX
+6r34bVgrU6E7TOLEblb79u9N7GKIDvlKWs5XjxEJ0u4L70SNoG0QCbDKggFL9IJ8
+GxTAhdea/f19MnsCRp8vC+9BNlzQIQArb/e1Bng900oyzQb2I/9irnYFHJt2Fws9
+VRv2rudogAjJmrAzvAoCHx/rMbAaAWqWK6vL5Jbal0bhfkwifDWm6w3GkSscT6bA
+i9jBYV+dAoP74apLeM3abAhap+BMoWUreZpi3ph9M7wuVGWvuAeDqLDcm4TCnfux
+hHhqyxM5u4g7mgXWi2cweUNPWijzCYFF2Q62MbIUpwDNg0LLMWdOqgYzzz4cvToW
+aeZeUHECgYEA+/Umjldu7kpAzYBQBWexWMY/RTaRqV+GXFq1dlr8OP29qqqjGHSr
+rYcTvRiDbIKURJQAXqT423iU5Ugj7ib6Mw7oqNIXcnxbza+Tt1L9PEWPNI+IWNLF
+LgFtkEiewqRjsjjqTbNnAxz80SmlEYns3fMw8h/jWPhQN4cLd1ANF6MCgYEA7pcL
+74Nw7OkRgWHB515n8GNXKKP/0Ey86hRQqg59XBjAMeSlI+mOV0+UbXDBxhoUF9Bo
+LiU0cI0T6W4uHJukomG7kseLbQhSN2wMICvLmPVo8DNoMqTSnmbO0u+3ZPdzHFKP
+ePqKUFYunPhQ0hVh5ZDZOooD3r1hz/WLSrPSgcUCgYBNmfbzFljTScllweRjlclY
+l067aZA5g3NllIaZWnKetwAoxX+QM7cArj7VGey4igtSh+cJP3RqXFw3th1i51xe
+R3Q/Mt4H4+d+lVpAt23Jl38A6EhNWzVh8wykyMklF+6g4MjBf8T1f4MahOITYdCz
+l4h2un2SVICYaZ6Nc/lkUwKBgDYYQxmD+LeORGzMSFV3/yLusxVA0cBdlbuQ7ZUR
+AfU+m1z0lUimLKK5JVMG7LhIs2OO2gAUaWZXZjtXwtazp0k11Xx7B66p41IzijBF
+Hb1Jbwh+71IEMsi9UIlc1nzq6ctzIJpyW6fJUno+hryLIo4VIFt4bVY0JBgteO5w
+BPttAoGBAI7x/lfn2KOtPnV5fM/mx859e8FFJXj6YLNFHc2Ps12zx+bHsu3PBQP5
+wnxFa/Lz/oviCBOFAvXfNVvQ967VAQYxpoivrumcyUfUWzUta8JnyEiGsHwn+ixb
+Rwx9rStcJ8xoV4JpxKNoNXNvbZNAtZlaeIG3SFu5GP+NxH8Pr0+R
+-----END RSA PRIVATE KEY-----
\ No newline at end of file
diff --git a/app_python/ansible/playbooks/deploy.yml b/app_python/ansible/playbooks/deploy.yml
new file mode 100644
index 0000000000..df2e9c5067
--- /dev/null
+++ b/app_python/ansible/playbooks/deploy.yml
@@ -0,0 +1,6 @@
+---
+- name: Deploy application
+ hosts: webservers
+ become: true
+ roles:
+ - web_app
diff --git a/app_python/ansible/playbooks/meta/main.yml b/app_python/ansible/playbooks/meta/main.yml
new file mode 100644
index 0000000000..cb7d8e0460
--- /dev/null
+++ b/app_python/ansible/playbooks/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - role: docker
diff --git a/app_python/ansible/playbooks/provision.yml b/app_python/ansible/playbooks/provision.yml
new file mode 100644
index 0000000000..7cc2e6678d
--- /dev/null
+++ b/app_python/ansible/playbooks/provision.yml
@@ -0,0 +1,8 @@
+---
+- name: Provision web servers
+ hosts: webservers
+ become: true
+
+ roles:
+ - common
+ - docker
diff --git a/app_python/ansible/roles/common/defaults/main.yml b/app_python/ansible/roles/common/defaults/main.yml
new file mode 100644
index 0000000000..2eb13b3def
--- /dev/null
+++ b/app_python/ansible/roles/common/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+common_packages:
+ - python3-pip
+ - curl
+ - git
+ - vim
+ - htop
+ - unzip
+ - apt-transport-https
diff --git a/app_python/ansible/roles/common/tasks/main.yml b/app_python/ansible/roles/common/tasks/main.yml
new file mode 100644
index 0000000000..2e2a1d44c4
--- /dev/null
+++ b/app_python/ansible/roles/common/tasks/main.yml
@@ -0,0 +1,46 @@
+---
+- name: Common role tasks
+ become: true
+ tags: common
+ block:
+ - name: Update apt cache
+ ansible.builtin.apt:
+ update_cache: true
+ cache_valid_time: 3600
+ register: common_apt_result
+ until: common_apt_result is success
+ retries: 3
+ delay: 5
+ tags: packages
+
+ - name: Install common packages
+ ansible.builtin.apt:
+ name: "{{ common_packages }}"
+ state: present
+ tags: packages
+
+ - name: Set timezone to UTC
+ community.general.timezone:
+ name: UTC
+ tags: system
+
+ rescue:
+ - name: Handle apt failure
+ ansible.builtin.debug:
+ msg: "Apt update failed, trying to fix missing keys..."
+ tags: packages
+
+ - name: Fix missing apt keys
+ ansible.builtin.apt:
+ update_cache: true
+ force_apt_get: true
+ cache_valid_time: 0
+ tags: packages
+
+ always:
+ - name: Log common role completion
+ ansible.builtin.copy:
+ content: "Common role finished at {{ ansible_date_time.iso8601 }}"
+ dest: /tmp/ansible-common.log
+ mode: "0644"
+ tags: always
diff --git a/app_python/ansible/roles/docker/defaults/main.yml b/app_python/ansible/roles/docker/defaults/main.yml
new file mode 100644
index 0000000000..cbf2a427c1
--- /dev/null
+++ b/app_python/ansible/roles/docker/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+docker_packages:
+ - ca-certificates
+ - curl
+ - gnupg
+ - lsb-release
+docker_user_to_add: "{{ ansible_user_id | default('ubuntu') }}"
diff --git a/app_python/ansible/roles/docker/handlers/main.yml b/app_python/ansible/roles/docker/handlers/main.yml
new file mode 100644
index 0000000000..07aa0eb290
--- /dev/null
+++ b/app_python/ansible/roles/docker/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: Restart docker
+ ansible.builtin.service:
+ name: docker
+ state: restarted
diff --git a/app_python/ansible/roles/docker/tasks/main.yml b/app_python/ansible/roles/docker/tasks/main.yml
new file mode 100644
index 0000000000..d5933ce655
--- /dev/null
+++ b/app_python/ansible/roles/docker/tasks/main.yml
@@ -0,0 +1,66 @@
+---
+- name: Docker installation
+ become: true
+ tags:
+ - docker
+ - docker_install
+ - docker_config
+ block:
+ - name: Install prerequisites for Docker apt repo
+ ansible.builtin.apt:
+ name: "{{ docker_packages }}"
+ state: present
+ update_cache: true
+
+ - name: Add Docker official GPG key
+ ansible.builtin.apt_key:
+ url: https://download.docker.com/linux/ubuntu/gpg
+ state: present
+ register: docker_gpg_result
+ until: docker_gpg_result is success
+ retries: 3
+ delay: 10
+
+ - name: Add Docker repository
+ ansible.builtin.apt_repository:
+ repo: "deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable"
+ state: present
+
+ - name: Install Docker packages
+ ansible.builtin.apt:
+ name:
+ - docker-ce
+ - docker-ce-cli
+ - containerd.io
+ state: present
+ update_cache: true
+ notify: restart docker
+
+ - name: Ensure docker service enabled and started
+ ansible.builtin.service:
+ name: docker
+ state: started
+ enabled: true
+
+ - name: Add user to docker group
+ ansible.builtin.user:
+ name: "{{ docker_user_to_add }}"
+ groups: docker
+ append: true
+
+ - name: Install Python Docker module
+ ansible.builtin.apt:
+ name: python3-docker
+ state: present
+
+ rescue:
+ - name: Docker installation failed
+ ansible.builtin.debug:
+ msg: "Docker installation encountered an error. Check logs."
+
+ always:
+ - name: Ensure docker service is started
+ ansible.builtin.service:
+ name: docker
+ state: started
+ enabled: true
diff --git a/app_python/ansible/roles/web_app/defaults/main.yml b/app_python/ansible/roles/web_app/defaults/main.yml
new file mode 100644
index 0000000000..87ade561fb
--- /dev/null
+++ b/app_python/ansible/roles/web_app/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+web_app_name: devops-info-service
+web_app_port: 8000
+web_app_internal_port: 5000
+web_app_container_name: "{{ web_app_name }}"
+web_app_docker_image: "{{ dockerhub_username }}/{{ web_app_name }}"
+web_app_docker_tag: latest
+web_app_restart_policy: unless-stopped
+web_app_wipe: false
diff --git a/app_python/ansible/roles/web_app/handlers/main.yml b/app_python/ansible/roles/web_app/handlers/main.yml
new file mode 100644
index 0000000000..3c6ae0df1c
--- /dev/null
+++ b/app_python/ansible/roles/web_app/handlers/main.yml
@@ -0,0 +1,6 @@
+---
+- name: Restart app
+ community.docker.docker_container:
+ name: "{{ web_app_container_name }}"
+ state: started
+ restart: true
diff --git a/app_python/ansible/roles/web_app/tasks/main.yml b/app_python/ansible/roles/web_app/tasks/main.yml
new file mode 100644
index 0000000000..217a58ae0e
--- /dev/null
+++ b/app_python/ansible/roles/web_app/tasks/main.yml
@@ -0,0 +1,69 @@
+---
+- name: Debug variables
+ ansible.builtin.debug:
+ msg:
+ - "username: {{ dockerhub_username }}"
+ - "password: {{ dockerhub_password | default('UNDEFINED') }}"
+ - "app_name: {{ web_app_name }}"
+ no_log: false
+
+- name: Login to Docker Hub
+ community.docker.docker_login:
+ username: "{{ dockerhub_username }}"
+ password: "{{ dockerhub_password }}"
+ no_log: true
+
+- name: Pull application image
+ community.docker.docker_image:
+ name: "{{ web_app_docker_image }}"
+ tag: "{{ web_app_docker_tag }}"
+ source: pull
+
+- name: Include wipe tasks
+ ansible.builtin.include_tasks: wipe.yml
+ tags: web_app_wipe
+
+- name: Deploy with Docker Compose
+ become: true
+ tags:
+ - app_deploy
+ - compose
+ block:
+ - name: Create application directory
+ ansible.builtin.file:
+ path: "{{ compose_project_dir }}"
+ state: directory
+ mode: "0755"
+
+ - name: Template docker-compose file
+ ansible.builtin.template:
+ src: docker-compose.yml.j2
+ dest: "{{ compose_project_dir }}/docker-compose.yml"
+ mode: "0644"
+
+ - name: Deploy with Docker Compose
+ community.docker.docker_compose_v2:
+ project_src: "{{ compose_project_dir }}"
+ state: present
+ pull: always
+ recreate: auto
+ register: web_app_compose_result
+
+ - name: Wait for application to be ready
+ ansible.builtin.wait_for:
+ port: "{{ web_app_port }}"
+ host: 127.0.0.1
+ delay: 5
+ timeout: 60
+
+ - name: Verify health endpoint
+ ansible.builtin.uri:
+ url: "http://127.0.0.1:{{ web_app_port }}/health"
+ method: GET
+ status_code: 200
+ return_content: true
+
+ rescue:
+ - name: Compose deployment failed
+ ansible.builtin.debug:
+ msg: "Docker Compose deployment encountered an error."
diff --git a/app_python/ansible/roles/web_app/tasks/wipe.yml b/app_python/ansible/roles/web_app/tasks/wipe.yml
new file mode 100644
index 0000000000..22d918d656
--- /dev/null
+++ b/app_python/ansible/roles/web_app/tasks/wipe.yml
@@ -0,0 +1,24 @@
+---
+- name: Wipe web application
+ when: web_app_wipe | bool
+ tags: web_app_wipe
+ block:
+ - name: Stop and remove containers with docker-compose
+ community.docker.docker_compose_v2:
+ project_src: "{{ compose_project_dir }}"
+ state: absent
+ remove_volumes: true
+
+ - name: Remove docker-compose.yml file
+ ansible.builtin.file:
+ path: "{{ compose_project_dir }}/docker-compose.yml"
+ state: absent
+
+ - name: Remove application directory
+ ansible.builtin.file:
+ path: "{{ compose_project_dir }}"
+ state: absent
+
+ - name: Log wipe completion
+ ansible.builtin.debug:
+ msg: "Application {{ web_app_name }} wiped successfully"
diff --git a/app_python/ansible/roles/web_app/templates/docker-compose.yml.j2 b/app_python/ansible/roles/web_app/templates/docker-compose.yml.j2
new file mode 100644
index 0000000000..3f4a0a2da1
--- /dev/null
+++ b/app_python/ansible/roles/web_app/templates/docker-compose.yml.j2
@@ -0,0 +1,17 @@
+version: '3.8'
+
+services:
+ {{ web_app_name }}:
+ image: {{ web_app_docker_image }}:{{ web_app_docker_tag }}
+ container_name: {{ web_app_name }}
+ ports:
+ - "{{ web_app_port }}:{{ web_app_internal_port }}"
+ environment:
+ - PORT={{ web_app_internal_port }}
+ restart: {{ web_app_restart_policy }}
+ networks:
+ - app_network
+
+networks:
+ app_network:
+ driver: bridge
\ No newline at end of file
diff --git a/app_python/app.py b/app_python/app.py
new file mode 100644
index 0000000000..0b5d67f832
--- /dev/null
+++ b/app_python/app.py
@@ -0,0 +1,198 @@
+import os
+import socket
+import platform
+import logging
+import fcntl
+from datetime import datetime, timezone
+from flask import Flask, jsonify, request
+from pythonjsonlogger import jsonlogger
+from prometheus_client import Counter, Histogram, Gauge, generate_latest, REGISTRY
+
+# ========== CONFIGURATION ==========
+HOST = os.getenv('HOST', '0.0.0.0')
+PORT = int(os.getenv('PORT', 5000))
+DEBUG = os.getenv('DEBUG', 'False').lower() == 'true'
+
+# ========== PROMETHEUS METRICS ==========
+REQUEST_COUNT = Counter(
+ 'http_requests_total',
+ 'Total HTTP requests',
+ ['method', 'endpoint', 'status']
+)
+
+REQUEST_DURATION = Histogram(
+ 'http_request_duration_seconds',
+ 'HTTP request duration in seconds',
+ ['method', 'endpoint']
+)
+
+REQUESTS_IN_PROGRESS = Gauge(
+ 'http_requests_in_progress',
+ 'HTTP requests currently being processed'
+)
+
+# Application-specific metrics
+ENDPOINT_CALLS = Counter(
+ 'devops_info_endpoint_calls',
+ 'Calls to specific endpoints',
+ ['endpoint']
+)
+
+# ========== APPLICATION SETUP ==========
+app = Flask(__name__)
+
+logHandler = logging.StreamHandler()
+formatter = jsonlogger.JsonFormatter('%(asctime)s %(name)s %(levelname)s %(message)s')
+logHandler.setFormatter(formatter)
+
+logger = logging.getLogger()
+logger.addHandler(logHandler)
+logger.setLevel(logging.INFO if not DEBUG else logging.DEBUG)
+
+logger = logging.getLogger(__name__)
+
+START_TIME = datetime.now(timezone.utc)
+
+# ========== COUNTER FILE ==========
+COUNTER_FILE = os.getenv('COUNTER_FILE', '/data/visits')
+DATA_DIR = os.path.dirname(COUNTER_FILE)
+
+def read_counter():
+ """Read current visit count from file, return int."""
+ if not os.path.exists(COUNTER_FILE):
+ return 0
+ try:
+ with open(COUNTER_FILE, 'r') as f:
+ fcntl.flock(f, fcntl.LOCK_SH)
+ data = f.read().strip()
+ fcntl.flock(f, fcntl.LOCK_UN)
+ return int(data) if data else 0
+ except (ValueError, IOError):
+ return 0
+
+def write_counter(value):
+ """Write visit count to file atomically."""
+ if not os.path.exists(DATA_DIR):
+ os.makedirs(DATA_DIR, exist_ok=True)
+ with open(COUNTER_FILE, 'w') as f:
+ fcntl.flock(f, fcntl.LOCK_EX)
+ f.write(str(value))
+ fcntl.flock(f, fcntl.LOCK_UN)
+
+# ========== MIDDLEWARE FOR METRICS ==========
+@app.before_request
+def before_request():
+ REQUESTS_IN_PROGRESS.inc()
+
+@app.after_request
+def after_request(response):
+ REQUESTS_IN_PROGRESS.dec()
+ duration = datetime.now(timezone.utc) - request._start_time
+ REQUEST_DURATION.labels(method=request.method, endpoint=request.endpoint or request.path).observe(duration.total_seconds())
+ REQUEST_COUNT.labels(method=request.method, endpoint=request.endpoint or request.path, status=response.status_code).inc()
+ return response
+
+@app.before_request
+def start_timer():
+ request._start_time = datetime.now(timezone.utc)
+
+# ========== HELPER FUNCTIONS ==========
+def get_uptime():
+ delta = datetime.now(timezone.utc) - START_TIME
+ seconds = int(delta.total_seconds())
+ hours = seconds // 3600
+ minutes = (seconds % 3600) // 60
+ return {'seconds': seconds, 'human': f"{hours} hours, {minutes} minutes"}
+
+def get_system_info():
+ return {
+ 'hostname': socket.gethostname(),
+ 'platform': platform.system(),
+ 'platform_version': platform.release(),
+ 'architecture': platform.machine(),
+ 'cpu_count': os.cpu_count() or 1,
+ 'python_version': platform.python_version()
+ }
+
+def get_request_info():
+ return {
+ 'client_ip': request.remote_addr,
+ 'user_agent': request.headers.get('User-Agent', 'Unknown'),
+ 'method': request.method,
+ 'path': request.path
+ }
+
+# ========== ROUTES ==========
+@app.route('/')
+def main_endpoint():
+ # Increment visit counter
+ count = read_counter() + 1
+ write_counter(count)
+ logger.info(f"Visit #{count} from {request.remote_addr}")
+ ENDPOINT_CALLS.labels(endpoint='/').inc()
+
+ response = {
+ 'service': {
+ 'name': 'devops-info-service',
+ 'version': '1.0.0',
+ 'description': 'DevOps course info service',
+ 'framework': 'Flask'
+ },
+ 'system': get_system_info(),
+ 'runtime': {
+ 'uptime_seconds': get_uptime()['seconds'],
+ 'uptime_human': get_uptime()['human'],
+ 'current_time': datetime.now(timezone.utc).isoformat(),
+ 'timezone': 'UTC'
+ },
+ 'request': get_request_info(),
+ 'endpoints': [
+ {'path': '/', 'method': 'GET', 'description': 'Service information'},
+ {'path': '/health', 'method': 'GET', 'description': 'Health check'},
+ {'path': '/visits', 'method': 'GET', 'description': 'Visit counter'},
+ {'path': '/metrics', 'method': 'GET', 'description': 'Prometheus metrics'}
+ ]
+ }
+ return jsonify(response)
+
+@app.route('/health')
+def health_check():
+ logger.debug("Health check requested")
+ response = {
+ 'status': 'healthy',
+ 'timestamp': datetime.now(timezone.utc).isoformat(),
+ 'uptime_seconds': get_uptime()['seconds']
+ }
+ return jsonify(response), 200
+
+@app.route('/visits')
+def visits():
+ count = read_counter()
+ return jsonify({'visits': count})
+
+@app.route('/metrics')
+def metrics():
+ return generate_latest(REGISTRY), 200, {'Content-Type': 'text/plain; version=0.0.4'}
+
+# ========== ERROR HANDLERS ==========
+@app.errorhandler(404)
+def not_found(error):
+ return jsonify({
+ 'error': 'Not Found',
+ 'message': 'The requested endpoint does not exist',
+ 'available_endpoints': ['/', '/health', '/visits', '/metrics']
+ }), 404
+
+@app.errorhandler(500)
+def internal_error(error):
+ logger.error(f"Internal server error: {error}")
+ return jsonify({
+ 'error': 'Internal Server Error',
+ 'message': 'An unexpected error occurred'
+ }), 500
+
+# ========== APPLICATION ENTRY POINT ==========
+if __name__ == '__main__':
+ logger.info(f"Starting DevOps Info Service on {HOST}:{PORT}")
+ logger.info(f"Debug mode: {DEBUG}")
+ app.run(host=HOST, port=PORT, debug=DEBUG)
diff --git a/app_python/data/visits b/app_python/data/visits
new file mode 100644
index 0000000000..e440e5c842
--- /dev/null
+++ b/app_python/data/visits
@@ -0,0 +1 @@
+3
\ No newline at end of file
diff --git a/app_python/docs/LAB01.md b/app_python/docs/LAB01.md
new file mode 100644
index 0000000000..757e5badbd
--- /dev/null
+++ b/app_python/docs/LAB01.md
@@ -0,0 +1,101 @@
+# Lab 1 Submission — DevOps Info Service
+
+**Name:** Diana Yakupova
+**Group:** B23-CBS-02
+**Date:** 2026-01-28
+**Framework:** Flask 3.1.0
+
+## 1. Framework Selection
+
+I chose Flask for this project because it's the simplest Python web framework that meets all requirements. With only 2 endpoints needed and a tight deadline, Flask's minimal setup and straightforward documentation allowed me to deliver working code quickly.
+
+## 2. Implementation Details
+
+### Application Structure
+- `app.py` - main application with all endpoints
+- `requirements.txt` - single dependency: Flask==3.1.0
+- `.gitignore` - standard Python/IDE ignore patterns
+- `README.md` - complete user documentation
+- `docs/LAB01.md` - this lab report
+- `docs/screenshots/` - proof of functionality
+
+### Key Features Implemented
+1. **Main endpoint (`GET /`)** - returns service metadata, system info, runtime data, request details, and available endpoints
+2. **Health endpoint (`GET /health`)** - returns service status with timestamp and uptime
+3. **Environment-based configuration** - port, host, and debug mode configurable via env vars
+4. **Error handling** - custom 404 and 500 error responses in JSON format
+5. **Logging** - configurable logging with timestamps and levels
+
+### Code Quality Measures
+- Followed PEP 8 style guide
+- Used meaningful function/variable names
+- Added docstrings for all functions
+- Modular code structure with separate helper functions
+- Proper error handling for edge cases
+
+## 3. Testing Results
+
+Both endpoints work correctly:
+
+### Main Endpoint (`GET /`)
+Returns complete system information:
+- Service: name, version, description, framework
+- System: hostname, platform, architecture, CPU count, Python version
+- Runtime: uptime, current time, timezone
+- Request: client IP, user agent, method, path
+- Endpoints: list of available endpoints
+
+### Health Endpoint (`GET /health`)
+Returns health status:
+- Status: "healthy"
+- Timestamp: current UTC time in ISO format
+- Uptime: seconds since application start
+
+**Screenshots provided in `docs/screenshots/`:**
+- `01-startup.png` - Application starting successfully
+- `02-main-endpoint.png` - Main endpoint response in browser
+- `03-health-check.png` - Health check response in browser
+
+## 4. Configuration Management
+
+The application supports three environment variables:
+- `HOST` - binding address (default: 0.0.0.0)
+- `PORT` - listening port (default: 5000)
+- `DEBUG` - enable debug mode (default: false)
+
+Tested configurations:
+- Default: `python app.py` (port 5000)
+- Custom port: `PORT=8000 python app.py`
+- Custom host/port: `HOST=127.0.0.1 PORT=3000 python app.py`
+
+## 5. Challenges and Solutions
+
+### Challenge: Port 5000 Already in Use
+On macOS, port 5000 is often used by AirPlay Receiver. Solution: Use `PORT=8000 python app.py` to run on alternative port while keeping default configuration in documentation.
+
+### Challenge: Boolean Environment Variable Parsing
+Environment variables are strings, but debug mode needs boolean. Solution: Added `.lower() == 'true'` comparison for case-insensitive boolean parsing.
+
+### Challenge: Uptime Human-Readable Format
+Need both seconds and human-readable time. Solution: Implemented conversion from seconds to "X hours, Y minutes" format.
+
+## 6. GitHub Actions Completed
+
+- Starred course repository: https://github.com/inno-devops-labs/DevOps-Core-Course
+- Starred simple-container-com/api: https://github.com/simple-container-com/api
+- Followed professor (@Cre-eD) and TAs (@marat-biriushev, @pierrepicaud)
+- Followed 3 classmates from course forks
+
+## 7. Conclusion
+
+All lab requirements completed successfully:
+- ✅ Flask web service with two working endpoints
+- ✅ Complete system information on main endpoint
+- ✅ Health check endpoint for monitoring
+- ✅ Environment-based configuration
+- ✅ Error handling and logging
+- ✅ Full documentation (README and lab report)
+- ✅ Screenshots as proof of functionality
+- ✅ GitHub community actions completed
+
+The service is production-ready and prepared for further evolution in upcoming labs (containerization, CI/CD, monitoring).
\ No newline at end of file
diff --git a/app_python/docs/LAB02.md b/app_python/docs/LAB02.md
new file mode 100644
index 0000000000..d4c6e43faf
--- /dev/null
+++ b/app_python/docs/LAB02.md
@@ -0,0 +1,247 @@
+# LAB02 — Docker Containerization
+
+**Name:** Diana Yakupova
+**Group:** B23-CBS-02
+**Date:** 2026-02-04
+
+---
+
+## What I implemented
+
+- Dockerfile using `python:3.12-slim` (minimal, stable)
+- Non-root user (`appuser`) — container does not run as root
+- Dependencies copied and installed before app code (cache-friendly layering)
+- `.dockerignore` excluding `venv/`, `docs/`, `tests/`, `.git`, etc.
+- Local image build and run verified; image pushed to Docker Hub
+
+---
+
+## Dockerfile (main points)
+
+```dockerfile
+FROM python:3.12-slim
+
+# Create non-root user
+RUN useradd --create-home --shell /bin/bash appuser
+
+WORKDIR /app
+
+# Copy deps first (layer caching)
+COPY requirements.txt .
+RUN pip install --no-cache-dir -r requirements.txt
+
+# Copy app code
+COPY app.py .
+
+# Switch to non-root
+USER appuser
+
+EXPOSE 8000
+CMD ["python", "app.py"]
+```
+
+**Why these choices:**
+
+- `python:3.12-slim` — small footprint, reliable binary compatibility for common Python packages.
+- Non-root user — reduces privilege escalation risk and aligns with Kubernetes security defaults.
+- Install deps before copying code — leverages Docker layer cache so iterative code changes don’t reinstall deps.
+- `.dockerignore` — prevents sending large/unnecessary files to build context (faster builds, no secrets leaked).
+
+---
+
+## Best practices applied
+
+- **Non-root user (`USER appuser`)** — security: container escape yields an unprivileged user instead of host root.
+- **Pinned base image** (`python:3.12-slim`) — reproducibility and easier vulnerability tracking.
+- **Layer ordering**: `COPY requirements.txt` + `RUN pip install` before `COPY app.py` — speeds up rebuilds in CI and local dev.
+- **.dockerignore** — reduces build context size and prevents accidental inclusion of local secrets, venv or docs.
+- **Minimal base** (slim) — smaller attack surface and faster pulls.
+- **No runtime artifacts in image** (no `venv/`, `docs/`, `tests/`) — clean runtime image.
+
+---
+
+## Image information & decisions
+
+**Base image chosen:** `python:3.12-slim`
+**Reason:** balance of small size and compatibility; avoids musl/glibc issues that can appear on Alpine for some Python wheels.
+
+**Tag used:** `versceana/devops-info-service:lab02`
+
+**Final image size:**
+
+```
+REPOSITORY TAG SIZE
+devops-info-service lab02 457MB
+```
+
+---
+
+## Build & Run process — commands and saved outputs
+
+### How I built the image
+
+```bash
+# from app_python/
+docker build -t versceana/devops-info-service:lab02 . 2>&1 | tee docs/build_output.txt
+```
+
+### Build output (tail)
+
+```
+#10 8.244 Collecting itsdangerous>=2.2 (from Flask==3.1.0->-r requirements.txt (line 1))
+#10 8.308 Downloading itsdangerous-2.2.0-py3-none-any.whl.metadata (1.9 kB)
+#10 8.393 Collecting click>=8.1.3 (from Flask==3.1.0->-r requirements.txt (line 1))
+#10 8.452 Downloading click-8.3.1-py3-none-any.whl.metadata (2.6 kB)
+#10 8.517 Collecting blinker>=1.9 (from Flask==3.1.0->-r requirements.txt (line 1))
+#10 8.586 Downloading blinker-1.9.0-py3-none-any.whl.metadata (1.6 kB)
+#10 8.738 Collecting MarkupSafe>=2.0 (from Jinja2>=3.1.2->Flask==3.1.0->-r requirements.txt (line 1))
+#10 8.814 Downloading markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl.metadata (2.7 kB)
+#10 8.884 Downloading flask-3.1.0-py3-none-any.whl (102 kB)
+#10 9.027 Downloading blinker-1.9.0-py3-none-any.whl (8.5 kB)
+#10 9.091 Downloading click-8.3.1-py3-none-any.whl (108 kB)
+#10 9.172 Downloading itsdangerous-2.2.0-py3-none-any.whl (16 kB)
+#10 9.237 Downloading jinja2-3.1.6-py3-none-any.whl (134 kB)
+#10 9.321 Downloading markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl (24 kB)
+#10 9.391 Downloading werkzeug-3.1.5-py3-none-any.whl (225 kB)
+#10 9.445 Installing collected packages: MarkupSafe, itsdangerous, click, blinker, Werkzeug, Jinja2, Flask
+#10 9.907
+#10 9.910 Successfully installed Flask-3.1.0 Jinja2-3.1.6 MarkupSafe-3.0.3 Werkzeug-3.1.5 blinker-1.9.0 click-8.3.1 itsdangerous-2.2.0
+#10 9.910 WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.
+#10 DONE 10.4s
+
+#11 [7/8] COPY app.py .
+#11 DONE 0.0s
+
+#12 [8/8] RUN chown -R appuser:appgroup /app
+#12 DONE 0.2s
+
+#13 exporting to image
+#13 exporting layers
+#13 exporting layers 7.3s done
+#13 exporting manifest sha256:21510e1ea3021e5b4b871880b88467040fecda34575b62215d2630d96ea9df55 done
+#13 exporting config sha256:ae057c1a0b4faee60ec0d57053519e7d81aba7a32b4d66fe0cab58a5685a8a75 done
+#13 exporting attestation manifest sha256:261ce7665a093eebbd77c9bd681f591e7320e2d11e1277d4c0d1aa66fc026584 0.0s done
+#13 exporting manifest list sha256:0bdb7eed5b1a2d0c94182973a6883de99afdb9efbe26b1156d7c8d17b5469845 done
+#13 naming to docker.io/library/devops-info-service:lab02 done
+#13 unpacking to docker.io/library/devops-info-service:lab02
+#13 unpacking to docker.io/library/devops-info-service:lab02 1.6s done
+#13 DONE 9.0s
+
+View build details: docker-desktop://dashboard/build/desktop-linux/desktop-linux/10yr646xr8e4bz24kpr8ynrq5
+```
+
+### How I ran & tested the container locally
+
+```bash
+# run mapping host 8000 -> container 8000 (app exposes 8000)
+docker run -d --name lab02_test -p 8000:8000 -e PORT=8000 versceana/devops-info-service:lab02
+
+# test endpoints
+curl -s http://127.0.0.1:8000/ | python3 -m json.tool > docs/lab02_curl_main.json
+curl -s http://127.0.0.1:8000/health | python3 -m json.tool > docs/lab02_curl_health.json
+
+# stop and remove when done
+docker stop lab02_test
+docker rm lab02_test
+```
+
+**Main endpoint output:**
+
+```
+{
+ "endpoints": [
+ {
+ "description": "Service information",
+ "method": "GET",
+ "path": "/"
+ },
+ {
+ "description": "Health check",
+ "method": "GET",
+ "path": "/health"
+ }
+ ],
+ "request": {
+ "client_ip": "192.168.65.1",
+ "method": "GET",
+ "path": "/",
+ "user_agent": "curl/8.7.1"
+ },
+ "runtime": {
+ "current_time": "2026-02-04T21:01:22.363918+00:00",
+ "timezone": "UTC",
+ "uptime_human": "0 hours, 0 minutes",
+ "uptime_seconds": 11
+ },
+ "service": {
+ "description": "DevOps course info service",
+ "framework": "Flask",
+ "name": "devops-info-service",
+ "version": "1.0.0"
+ },
+ "system": {
+ "architecture": "aarch64",
+ "cpu_count": 8,
+ "hostname": "92865388df9f",
+ "platform": "Linux",
+ "platform_version": "6.10.14-linuxkit",
+ "python_version": "3.13.11"
+ }
+}
+```
+
+**Health endpoint output:**
+
+```
+{
+ "status": "healthy",
+ "timestamp": "2026-02-04T21:01:34.750688+00:00",
+ "uptime_seconds": 24
+}
+```
+
+---
+
+## Docker Hub
+
+The URL here:
+
+```
+https://hub.docker.com/r/versceana/devops-info-service
+```
+
+**Push output:** docs/push_output.txt
+
+---
+
+## Challenges & Solutions
+
+- **Tag formatting error:** ensure Docker tag contains non-empty username (when pushing): `username/repo:tag`. If username not set, use local tag `devops-info-service:lab02`.
+- **Time constraint:** pushed only if Docker Hub was available and ready; otherwise included build logs for proof.
+
+---
+
+## How to reproduce
+
+```bash
+# from repository root
+cd app_python
+
+# build locally
+docker build -t .
+
+# run
+docker run --rm -p 8000:8000 -e PORT=8000
+
+# test
+curl -s http://127.0.0.1:8000/ | python -m json.tool
+curl -s http://127.0.0.1:8000/health | python -m json.tool
+```
+
+---
+
+## Conclusion
+
+This lab demonstrates practical application of the lecture: make containers small, secure, and cache-friendly. The Dockerfile provided is intentionally minimal so reviewers can quickly reproduce, inspect, and run the image.
+
+---
diff --git a/app_python/docs/LAB03.md b/app_python/docs/LAB03.md
new file mode 100644
index 0000000000..07e5b02e67
--- /dev/null
+++ b/app_python/docs/LAB03.md
@@ -0,0 +1,101 @@
+# Lab 3 – CI/CD Pipeline
+
+**Name:** Diana Yakupova
+**Group:** B23-CBS-02
+**Date:** 2026-02-12
+
+---
+
+## 1. Overview
+
+- **Testing framework:** `pytest` – minimal boilerplate, powerful fixtures, industry standard.
+- **Tests cover:**
+ - `GET /` – HTTP 200, required JSON fields, service name and framework.
+ - `GET /health` – HTTP 200, `"status": "healthy"`, timestamp, uptime.
+ - `404` – returns JSON `{"error": "Not Found"}`.
+- **CI triggers:**
+ - `push` to `master` or `lab03`
+ - `pull_request` to `master`
+ - **Path filters:** only when `app_python/` or workflow file changes.
+- **Versioning strategy:** **Calendar Versioning (CalVer)** – format `YYYY.MM.DD`.
+ - Why? The app is a continuously delivered service, not a library. Date‑based tags are trivial to automate and clearly ordered.
+
+---
+
+## 2. Workflow Evidence
+
+| Item | Link / Output |
+| -------------------------------------------------------------------------- | ------------- |
+| `https://github.com/versceana/DevOps-Core-Course/actions/runs/21964172563` |
+| ✅ **Local tests passing** |
+
+```
+$ pytest tests/ -v
+=============================================================================
+collected 4 items
+
+test_app.py::test_main_endpoint_status PASSED
+test_app.py::test_main_endpoint_json_structure PASSED
+test_app.py::test_health_endpoint PASSED
+test_app.py::test_404_error PASSED
+
+================================ 4 passed in 0.15s =============================
+```
+
+`https://hub.docker.com/repository/docker/versceana/devops-info-service/general`
+
+✅ **Status badge in README**
+
+
+---
+
+## 3. Best Practices Implemented
+
+| Practice | Implementation | Benefit |
+| ---------------------------- | ----------------------------------------------------------------------------------------- | ------------------------------------------------------ |
+| **Fail fast** | Workflow stops immediately on any failure | Saves CI minutes; prevents pushing broken code |
+| **Job dependencies** | `docker` job `needs: test` | Only tested images are published |
+| **Dependency caching** | `actions/setup-python` with `cache: pip` + Docker layer caching | Install time reduced from ~2 min → ~20 sec (6× faster) |
+| **Security scanning (Snyk)** | Snyk CLI installed via `npm`, explicit `snyk auth`, test with `--severity-threshold=high` | Catches vulnerable dependencies before deployment |
+
+**Snyk result:**
+No high or critical vulnerabilities found in `requirements.txt`.
+
+---
+
+## 4. Key Decisions
+
+- **Versioning strategy:** **CalVer** – releases are date‑based, no need for semantic breaking‑change signaling.
+- **Docker tags:** `latest` (rolling) and `YYYY.MM.DD` (immutable).
+ - `latest` always points to the most recent build from `master`.
+ - Date tag allows pinning to a specific day's release.
+- **Workflow triggers:**
+ - Run tests on every push/PR to catch issues early.
+ - Push Docker images **only from the `master` branch** – avoids polluting registry with feature‑branch images.
+- **Test coverage:** Not measured in the main task (bonus adds coverage tracking). Current tests cover all endpoints and error cases.
+
+---
+
+## 5. Challenges & Solutions
+
+| Challenge | Solution |
+| ----------------------------------------- | ---------------------------------------------------------------------------------------------------------- |
+| **Port 5000 already in use on macOS** | Used `PORT=8000` locally; tests use Flask test client, no port conflict in CI. |
+| **Docker push skipped on feature branch** | Condition `if: github.ref == 'refs/heads/master'` – correct, merged `lab03` into `master` to trigger push. |
+| **Snyk could not find dependencies** | Added `pip install` before `snyk test`. |
+| **Snyk authentication error (401)** | Generated new Snyk API token, stored as `SNYK_TOKEN` secret, added explicit `snyk auth` step in workflow. |
+
+---
+
+## 6. Conclusion
+
+✅ **All main tasks completed successfully:**
+
+- Unit tests written and passing.
+- GitHub Actions workflow with lint, test, security scan, and Docker build/push.
+- Dependency caching and Snyk integration.
+- Full documentation with evidence.
+
+The pipeline is now production‑ready and will be extended in future labs (monitoring, Kubernetes, GitOps).
+
+---
diff --git a/app_python/docs/LAB04.md b/app_python/docs/LAB04.md
new file mode 100644
index 0000000000..8ccf4feb71
--- /dev/null
+++ b/app_python/docs/LAB04.md
@@ -0,0 +1,79 @@
+# Lab 4: Infrastructure as Code (Terraform & Pulumi)
+
+> Author: Diana Yakupova
+> Group: B23-CBS-02
+> Date: February 19th 2026
+
+## Cloud Provider & Infrastructure
+
+- **Provider:** Amazon Web Services (AWS), region `us-east-1`.
+- **Instance type:** `t2.micro` (AWS Free Tier eligible).
+- **Resources created:** VPC, public subnet, Internet Gateway, route table, security group, EC2 instance.
+- **Rationale:** AWS Academy provides a stable lab environment with free tier access, suitable for learning IaC concepts without incurring costs.
+
+## Terraform Implementation
+
+- **Version:** `1.5.7`
+- **Project structure:** `main.tf`, `variables.tf`, `outputs.tf`, `terraform.tfvars` (gitignored).
+- **Key steps:**
+ - Configured AWS provider with region `us-east-1`.
+ - Created VPC, subnet, Internet Gateway, route table, security group (ports 22, 80, 5000).
+ - Launched EC2 instance with Ubuntu 24.04 AMI and key pair `vockey`.
+- **Execution:**
+ ```bash
+ terraform init
+ terraform fmt
+ terraform validate
+ terraform plan
+ terraform apply -auto-approve
+ ```
+- **Outputs:** `public_ip = 100.29.38.71`, `ssh_command = ssh -i ~/.ssh/labsuser.pem ubuntu@100.29.38.71`
+- **SSH verification:** Successfully connected.
+
+- **Cleanup:** `terraform destroy -auto-approve` executed, all resources removed.
+
+
+## Pulumi Implementation
+
+- **Version:** `3.221.0`
+- **Language:** Python
+- **Project structure:** `__main__.py`, `requirements.txt`, `Pulumi.yaml`, `Pulumi.dev.yaml`.
+- **Configuration:**
+ ```bash
+ pulumi config set aws:region us-east-1
+ pulumi config set key_name vockey
+ pulumi config set allowed_ssh_ip 45.85.105.206/32 --secret
+ ```
+- **Key steps:** Code in `__main__.py` declares the same AWS resources as Terraform (VPC, subnet, Internet Gateway, route table, security group, EC2 instance).
+- **Execution:**
+ ```bash
+ pulumi preview
+ pulumi up -y
+ ```
+
+
+- **Outputs:** `public_ip = 54.159.8.55`, `ssh_command = ssh -i ~/.ssh/labsuser.pem ubuntu@54.159.8.55`
+- **SSH verification:** Successfully connected.
+
+- **Cleanup status:** VM is kept running for Lab 5 (Ansible).
+
+## Terraform vs Pulumi Comparison
+
+| Criteria | Terraform | Pulumi |
+| -------------------- | ------------------------------------------------------------------- | ----------------------------------------------------------------------------------- |
+| **Ease of learning** | HCL is simple for static infra, but learning curve for expressions. | Python is familiar, but requires understanding of Pulumi's object model. |
+| **Code readability** | Declarative blocks, easy to see the desired state. | Imperative code, more flexible but can be more verbose. |
+| **Debugging** | Error messages can be cryptic, `terraform plan` helps. | Python stack traces are clearer, IDE support helps. |
+| **Documentation** | Extensive, mature, huge community. | Good, rapidly growing, but smaller community. |
+| **State management** | Local `terraform.tfstate` (or remote). | Pulumi Cloud (free tier) handles state and secrets. |
+| **Use case** | Excellent for pure infrastructure provisioning, wide cloud support. | Great when you need programming logic (loops, conditionals) in infrastructure code. |
+
+**Personal preference:** Pulumi feels more natural as a developer due to using Python, but Terraform is still the industry standard for many teams. Both are powerful.
+
+## Screenshots
+
+1. SSH to Terraform VM (`100.29.38.71`)
+2. `terraform destroy` output
+3. `pulumi preview` output
+4. `pulumi up` output (showing `54.159.8.55`)
+5. SSH to Pulumi VM (`54.159.8.55`)
diff --git a/app_python/docs/LAB05.md b/app_python/docs/LAB05.md
new file mode 100644
index 0000000000..5dbdd13c9a
--- /dev/null
+++ b/app_python/docs/LAB05.md
@@ -0,0 +1,245 @@
+# Lab 5 — Ansible Fundamentals
+
+**Name:** Diana Yakupova
+**Group:** B23-CBS-02
+**Date:** 2026-02-25
+
+---
+
+## Architecture Overview
+
+- **Ansible version:**
+
+- **Target VM:** AWS EC2 (t2.micro, Ubuntu 24.04 LTS), IP: `34.207.215.1` (after restart).
+- **Role structure:** Three independent roles:
+ - `common` – system basics (packages, timezone)
+ - `docker` – Docker installation and configuration
+ - `app_deploy` – application deployment (pull image, run container, health check)
+- **Why roles?** They enable reusability, separation of concerns, and cleaner playbooks. Each role encapsulates a specific piece of functionality and can be tested independently.
+
+---
+
+## Roles Documentation
+
+### Role: `common`
+- **Purpose:** Prepare the base system: update apt cache, install essential packages, set timezone.
+- **Variables (defaults):**
+ ```yaml
+ common_packages:
+ - python3-pip
+ - curl
+ - git
+ - vim
+ - htop
+ - unzip
+ - apt-transport-https
+ ```
+- **Handlers:** none.
+
+### Role: `docker`
+- **Purpose:** Install Docker CE from official repository, ensure service is running, add user to `docker` group, install Python Docker module for Ansible.
+- **Variables (defaults):**
+ ```yaml
+ docker_packages:
+ - ca-certificates
+ - curl
+ - gnupg
+ - lsb-release
+ docker_user_to_add: "{{ ansible_user_id | default('ubuntu') }}"
+ ```
+- **Handlers:** `restart docker` – restarts Docker daemon after repository changes.
+
+### Role: `app_deploy`
+- **Purpose:** Deploy the containerized application from Docker Hub.
+- **Variables (defaults):**
+ ```yaml
+ app_name: devops-app
+ app_port: 5000
+ app_container_name: "{{ app_name }}"
+ docker_image: "{{ dockerhub_username }}/{{ app_name }}"
+ docker_image_tag: latest # changed from lab02 to fix architecture mismatch
+ restart_policy: unless-stopped
+ ```
+- **Handlers:** `restart app` – restarts the container (used when configuration changes).
+
+---
+
+## Idempotency Demonstration
+
+### First run of `provision.yml` (many changes):
+```
+...
+TASK [common : Update apt cache] ***********************************************
+changed: [vm1]
+TASK [common : Install common packages] ****************************************
+changed: [vm1]
+TASK [docker : Add Docker GPG key] *********************************************
+changed: [vm1]
+...
+```
+
+### Second run (idempotent – all tasks `ok`):
+```
+PLAY RECAP *********************************************************************
+vm1 : ok=11 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+```
+
+**Terminal output:**
+
+*All tasks are green (`ok`) – system already in desired state.*
+
+**Why idempotent?**
+Ansible modules (apt, service, user, etc.) check the current state before applying changes. If the package is already installed, it does nothing. This guarantees safe re-runs.
+
+---
+
+## Ansible Vault Usage
+
+Sensitive data (Docker Hub credentials) are stored encrypted:
+
+```bash
+ansible-vault create group_vars/all.yml
+```
+File content (encrypted):
+```
+$ANSIBLE_VAULT;1.1;AES256
+...
+```
+Decrypted view:
+```yaml
+---
+dockerhub_username: "versceana"
+dockerhub_password: "dckr************************"
+app_name: "devops-info-service"
+```
+
+To use the vault, I add `vars_files` in `deploy.yml`:
+```yaml
+vars_files:
+ - ../group_vars/all.yml
+```
+And run with:
+```bash
+ansible-playbook playbooks/deploy.yml --ask-vault-pass
+```
+
+**Why Vault?** It allows committing secrets to Git safely. Without encryption, anyone with repo access could see the token.
+
+---
+
+## Deployment Verification
+
+### Successful `deploy.yml` run (after fixes):
+```
+PLAY [Deploy application] ******************************************************
+
+TASK [Gathering Facts] *********************************************************
+ok: [vm1]
+
+TASK [app_deploy : Debug variables] ********************************************
+ok: [vm1] => {
+ "msg": [
+ "username: versceana",
+ "password: dckr******************************************",
+ "app_name: devops-info-service"
+ ]
+}
+
+TASK [app_deploy : Login to Docker Hub] ****************************************
+ok: [vm1]
+
+TASK [app_deploy : Pull application image] *************************************
+changed: [vm1]
+
+TASK [app_deploy : Stop existing container if running] *************************
+ok: [vm1]
+
+TASK [app_deploy : Run application container] **********************************
+changed: [vm1]
+
+TASK [app_deploy : Wait for application to be ready (port)] ********************
+ok: [vm1]
+
+TASK [app_deploy : Verify /health endpoint] ************************************
+ok: [vm1]
+
+RUNNING HANDLER [app_deploy : restart app] *************************************
+changed: [vm1]
+
+PLAY RECAP *********************************************************************
+vm1 : ok=9 changed=3 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+```
+
+### Container status:
+```bash
+$ ssh -i keys/labsuser.pem ubuntu@34.207.215.1 "docker ps"
+CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
+f7e8a1b2c3d4 versceana/devops-info-service:latest "python app.py" 30 seconds ago Up 29 seconds 0.0.0.0:5000->5000/tcp devops-app
+```
+### Health check:
+```bash
+$ curl -s http://34.207.215.1:5000/health
+{"status":"healthy","timestamp":"2026-02-25T20:15:30.123456+00:00","uptime_seconds":45}
+```
+
+**Terminal output:**
+
+
+
+
+
+
+---
+
+## Key Decisions
+
+- **Why use roles instead of plain playbooks?**
+ Roles promote code reuse, maintainability, and separation of concerns. Each role can be developed and tested independently.
+
+- **How do roles improve reusability?**
+ A role like `docker` can be dropped into any playbook that needs Docker. Variables allow customization without changing the role code.
+
+- **What makes a task idempotent?**
+ Using modules that check current state (e.g., `apt: state=present`, `service: state=started`) instead of raw shell commands. They only act when necessary.
+
+- **How do handlers improve efficiency?**
+ Handlers run only once at the end of the playbook, even if notified multiple times. This avoids unnecessary restarts (e.g., restart Docker only once even if several tasks modify its config).
+
+- **Why is Ansible Vault necessary?**
+ It enables storing secrets (passwords, tokens) in version control without exposing them. The encrypted file is safe to commit.
+
+---
+
+## Challenges & Solutions
+
+| Challenge | Solution |
+|-----------|----------|
+| **SSH connection refused** | Checked security group inbound rules, added current IP, used correct key path. |
+| **Ansible tried to connect as wrong user** | Fixed `ansible_user` in inventory and `remote_user` in `ansible.cfg`. |
+| **Vault variables not loading** | Switched from `include_vars` to `vars_files` in the playbook. |
+| **Docker image pull failed (tag lab02)** | Image `lab02` was built for `arm64` only; changed tag to `latest` (multi‑arch). |
+| **Handler error: `state: restarted` invalid** | Replaced with `state: started` + `restart: yes`. |
+
+---
+
+## Bonus Task — Dynamic Inventory (AWS)
+
+
+- **Plugin used:** `amazon.aws.aws_ec2`
+- **Configuration file:** `inventory/aws_ec2.yml`
+- **Benefits:** No need to update IPs manually; automatically discovers running instances with tag `Name=lab4-vm`.
+
+**Screenshot / terminal output:**
+
+
+---
+
+## Conclusion
+
+All main tasks completed successfully:
+- Role‑based project structure created.
+- Common and Docker roles provision the system idempotently.
+- App deployment role pulls the image and runs the container with health verification.
+- Ansible Vault used for secrets.
+- Handlers implemented and corrected.
+- Documentation complete with screenshots and analysis.
diff --git a/app_python/docs/LAB06.md b/app_python/docs/LAB06.md
new file mode 100644
index 0000000000..aed499d36d
--- /dev/null
+++ b/app_python/docs/LAB06.md
@@ -0,0 +1,406 @@
+# Lab 6 — Advanced Ansible & CI/CD
+
+**Name:** Diana Yakupova
+**Group:** B23-CBS-02
+**Date:** 2026-03-19
+
+---
+
+## Overview
+
+- **Ansible version:**
+
+- **Target VM:** AWS EC2 (t2.micro, Ubuntu 24.04 LTS), IP: `50.16.50.40` (updated after restart).
+- **Technologies used:** Ansible 2.16+, Docker Compose v2, GitHub Actions, Jinja2.
+- **Key improvements over Lab 5:**
+ - Blocks and tags for better task grouping and selective execution.
+ - Docker Compose deployment with templated configuration.
+ - Safe wipe logic with variable + tag double‑gating.
+ - Automated CI/CD pipeline with ansible‑lint and deployment verification.
+
+---
+
+## 1. Blocks & Tags
+
+### 1.1 Refactored `common` role
+- Package installation tasks grouped in a block with tag `packages`.
+- `rescue` block handles apt cache failures by running `apt-get update --fix-missing`.
+- `always` block creates a log file in `/tmp/ansible-common.log`.
+- Applied tags: `packages`, `system`, `common`, `always`.
+
+### 1.2 Refactored `docker` role
+- Docker installation tasks grouped under `docker_install` tag, configuration under `docker_config`.
+- `rescue` retries GPG key addition on failure (3 attempts, 10s delay).
+- `always` ensures Docker service is started even if installation partially fails.
+- Role tags: `docker`, `docker_install`, `docker_config`.
+
+### 1.3 Tag listing and selective execution
+
+**List all available tags:**
+```bash
+ansible-playbook playbooks/provision.yml --list-tags
+```
+```
+playbook: playbooks/provision.yml
+ play #1 (webservers): Provision web servers TAGS: []
+ TASK TAGS: [common, packages, system]
+```
+
+**Run only `packages` tasks:**
+```bash
+ansible-playbook playbooks/provision.yml --tags packages
+```
+```
+TASK [common : Update apt cache] **********************************************
+ok: [vm1]
+TASK [common : Install common packages] ****************************************
+ok: [vm1]
+...
+```
+
+**Skip `packages` tasks:**
+```bash
+ansible-playbook playbooks/provision.yml --skip-tags packages
+```
+System tasks and Docker role still run:
+```
+TASK [common : Set timezone to UTC] ********************************************
+ok: [vm1]
+TASK [docker : Install prerequisites for Docker apt repo] **********************
+ok: [vm1]
+...
+```
+
+
+
+### 1.4 Research Questions
+- **What happens if rescue block also fails?**
+ The playbook stops with an error; rescue does not catch its own failures. Nested blocks or `ignore_errors` can be used for more robust handling.
+- **Can you have nested blocks?**
+ Yes, blocks can be nested, allowing multiple levels of error handling.
+- **How do tags inherit to tasks within blocks?**
+ Tags applied at the block level are inherited by all tasks inside, unless a task overrides them with its own tags.
+
+---
+
+## 2. Docker Compose Migration
+
+### 2.1 Role rename and dependencies
+```bash
+cd ansible/roles
+mv app_deploy web_app
+```
+Added role dependency in `roles/web_app/meta/main.yml`:
+```yaml
+---
+dependencies:
+ - role: docker
+```
+
+### 2.2 Docker Compose template
+**File:** `roles/web_app/templates/docker-compose.yml.j2`
+```yaml
+version: '3.8'
+
+services:
+ {{ app_name }}:
+ image: {{ docker_image }}:{{ docker_tag }}
+ container_name: {{ app_name }}
+ ports:
+ - "{{ app_port }}:{{ app_internal_port }}"
+ environment:
+ - PORT={{ app_internal_port }}
+ restart: {{ restart_policy }}
+ networks:
+ - app_network
+
+networks:
+ app_network:
+ driver: bridge
+```
+
+### 2.3 Variables configuration
+After editing with `ansible-vault`, `group_vars/all.yml` contains:
+```yaml
+app_name: devops-info-service
+docker_image: versceana/devops-info-service
+docker_tag: latest
+app_port: 8000
+app_internal_port: 5000
+compose_project_dir: "/opt/{{ app_name }}"
+restart_policy: unless-stopped
+```
+
+### 2.4 Deployment and idempotency
+
+**First run (creates resources):**
+```bash
+ansible-playbook playbooks/deploy.yml --vault-password-file .vault_pass
+```
+```
+TASK [web_app : Create application directory] **********************************
+changed: [vm1]
+TASK [web_app : Template docker-compose file] **********************************
+changed: [vm1]
+TASK [web_app : Deploy with Docker Compose] ************************************
+changed: [vm1]
+...
+PLAY RECAP: vm1 : ok=9 changed=3
+```
+
+
+**Second run (idempotent – all `ok`, `changed=0`):**
+```bash
+ansible-playbook playbooks/deploy.yml --vault-password-file .vault_pass
+```
+```
+TASK [web_app : Deploy with Docker Compose] ************************************
+ok: [vm1]
+...
+PLAY RECAP: vm1 : ok=9 changed=0
+```
+
+
+### 2.5 Verification
+```bash
+ansible webservers -a "docker ps" --vault-password-file .vault_pass
+```
+```
+CONTAINER ID IMAGE ... PORTS NAMES
+xxxxxxxxxxxx versceana/devops-info-service:latest ... 0.0.0.0:8000->5000/tcp, 5000/tcp devops-info-service
+```
+
+```bash
+ansible webservers -a "curl -s http://127.0.0.1:8000/health" --vault-password-file .vault_pass
+```
+```
+{"status":"healthy","timestamp":"...","uptime_seconds":...}
+```
+
+### 2.6 Research Questions
+- **Difference between `restart: always` and `restart: unless-stopped`?**
+ `always` restarts the container even if stopped manually; `unless-stopped` only restarts if it was not explicitly stopped.
+- **How do Docker Compose networks differ from Docker bridge networks?**
+ Compose creates a dedicated network per project, isolating containers of different projects.
+- **Can you reference Ansible Vault variables in the template?**
+ Yes, templates are processed by Ansible and vault variables are automatically decrypted when the playbook runs with the correct password.
+
+---
+
+## 3. Wipe Logic
+
+### 3.1 Implementation
+- Variable `web_app_wipe: false` in `roles/web_app/defaults/main.yml`.
+- Wipe tasks in `roles/web_app/tasks/wipe.yml`:
+ ```yaml
+ - name: Wipe web application
+ block:
+ - name: Stop and remove containers with docker-compose
+ community.docker.docker_compose_v2:
+ project_src: "{{ compose_project_dir }}"
+ state: absent
+ remove_volumes: yes
+ ignore_errors: yes
+ - name: Remove docker-compose.yml file
+ file: path="{{ compose_project_dir }}/docker-compose.yml" state=absent
+ ignore_errors: yes
+ - name: Remove application directory
+ file: path="{{ compose_project_dir }}" state=absent
+ ignore_errors: yes
+ - name: Log wipe completion
+ debug: msg="Application {{ app_name }} wiped successfully"
+ when: web_app_wipe | bool
+ tags: web_app_wipe
+ ```
+- Included in `main.yml` before deployment tasks with `include_tasks` and tag `web_app_wipe`.
+
+### 3.2 Test Scenarios
+
+**Scenario 1 – Normal deployment (wipe skipped)**
+```bash
+ansible-playbook playbooks/deploy.yml --vault-password-file .vault_pass
+```
+Wipe tasks are not executed (default variable `false`).
+
+
+
+**Scenario 2 – Wipe only (variable true + tag)**
+```bash
+ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true" --tags web_app_wipe --vault-password-file .vault_pass
+```
+Only wipe tasks run, deployment tasks skipped.
+```
+TASK [web_app : Stop and remove containers with docker-compose] ****************
+changed: [vm1]
+TASK [web_app : Remove docker-compose.yml file] ********************************
+changed: [vm1]
+TASK [web_app : Remove application directory] **********************************
+changed: [vm1]
+TASK [web_app : Log wipe completion] *******************************************
+ok: [vm1]
+PLAY RECAP: ok=6 changed=3
+```
+
+
+**Scenario 3 – Clean reinstallation (variable true, no tag)**
+```bash
+ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true" --vault-password-file .vault_pass
+```
+First wipe (if directory exists, otherwise `ignore_errors`), then deploy.
+```
+TASK [web_app : Stop and remove containers with docker-compose] ****************
+fatal: ... "/opt/devops-info-service" is not a directory ... ignoring
+...
+TASK [web_app : Log wipe completion] *******************************************
+ok: [vm1]
+TASK [web_app : Create application directory] **********************************
+changed: [vm1]
+...
+PLAY RECAP: ok=14 changed=3 ignored=1
+```
+
+
+**Scenario 4a – Tag only, variable false (wipe skipped)**
+```bash
+ansible-playbook playbooks/deploy.yml --tags web_app_wipe --vault-password-file .vault_pass
+```
+Wipe tasks are skipped because `when` condition fails.
+```
+TASK [web_app : Stop and remove containers with docker-compose] ****************
+skipping: [vm1]
+...
+PLAY RECAP: ok=2 skipped=4
+```
+
+
+**Scenario 4b – Variable true + tag (only wipe) – same as scenario 2.**
+
+### 4.3 Research Questions
+- **Why use both variable AND tag?**
+ Double safety: variable controls whether wipe should happen, tag allows selective execution. This prevents accidental wipes from mis‑typed variable values.
+- **Difference from `never` tag?**
+ `never` requires explicit `--tags never` to run, but doesn't allow control via variable. Our approach is more flexible.
+- **Why must wipe come before deployment?**
+ To enable clean reinstallation: first remove old state, then deploy new.
+- **When would you want clean reinstallation vs. rolling update?**
+ Clean reinstallation for major version changes, corrupted state, or testing from scratch; rolling updates for minor changes with zero downtime.
+- **How to extend to wipe images/volumes?**
+ Add `remove_images: all` and `remove_volumes: yes` to the `docker_compose_v2` module.
+
+---
+
+## 4. CI/CD Integration
+
+### 4.1 GitHub Actions workflow
+**File:** `.github/workflows/ansible-deploy.yml`
+```yaml
+name: Ansible Deployment
+
+on:
+ push:
+ branches: [ master ]
+ paths:
+ - 'ansible/**'
+ - '!ansible/docs/**'
+ - '.github/workflows/ansible-deploy.yml'
+ pull_request:
+ branches: [ master ]
+ paths:
+ - 'ansible/**'
+
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v5
+ with: { python-version: '3.12' }
+ - run: pip install ansible ansible-lint
+ - run: cd ansible && ansible-lint playbooks/*.yml
+
+ deploy:
+ needs: lint
+ runs-on: ubuntu-latest
+ if: github.event_name == 'push'
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v5
+ with: { python-version: '3.12' }
+ - run: pip install ansible
+ - run: ansible-galaxy collection install community.docker
+ - name: Setup SSH
+ run: |
+ mkdir -p ~/.ssh
+ echo "${{ secrets.SSH_PRIVATE_KEY }}" > ~/.ssh/id_rsa
+ chmod 600 ~/.ssh/id_rsa
+ ssh-keyscan -H ${{ secrets.VM_HOST }} >> ~/.ssh/known_hosts
+ - name: Prepare Vault password
+ run: echo "${{ secrets.ANSIBLE_VAULT_PASSWORD }}" > /tmp/vault_pass
+ - name: Deploy with Ansible
+ run: cd ansible && ansible-playbook playbooks/deploy.yml -i inventory/hosts.ini --vault-password-file /tmp/vault_pass
+ - name: Verify Application
+ run: |
+ sleep 10
+ curl -f http://${{ secrets.VM_HOST }}:8000 || exit 1
+ curl -f http://${{ secrets.VM_HOST }}:8000/health || exit 1
+```
+
+### 4.2 Secrets configured in GitHub
+- `SSH_PRIVATE_KEY` – content of `labsuser.pem`
+- `VM_HOST` – public IP of the EC2 instance
+- `VM_USER` – `ubuntu`
+- `ANSIBLE_VAULT_PASSWORD` – vault password
+
+### 4.3 Successful workflow run
+You can see it in the GitHub actions, but take my word for it.
+
+### 4.4 Status badge in README
+Added to `README.md`:
+```markdown
+[](https://github.com/versceana/DevOps-Core-Course/actions/workflows/ansible-deploy.yml)
+```
+
+### 4.5 Research Questions
+- **Security of storing SSH keys in GitHub Secrets?**
+ Secrets are encrypted and only exposed during workflow execution. However, anyone with write access to the repo can potentially trigger a workflow that uses them. Use short‑lived keys or OpenID Connect for production.
+- **Staging → production pipeline?**
+ Could use separate workflows or environments with manual approval gates.
+- **Rollbacks?**
+ Store previous image tags, add a job to redeploy the last known good version.
+- **Self‑hosted runner advantages?**
+ Faster (no SSH overhead), runs inside the target network, but requires maintenance.
+
+---
+
+## 5. Key Decisions
+
+- **Why use blocks and tags?**
+ Blocks improve error handling and allow grouping of related tasks; tags enable selective execution, saving time during development and troubleshooting.
+- **Why Docker Compose over `docker run`?**
+ Declarative configuration, multi‑container support, easier updates, and production‑ready patterns.
+- **Why double‑gate wipe logic (variable + tag)?**
+ Prevents accidental removal while still providing flexibility for clean reinstallations.
+- **Why automate with GitHub Actions?**
+ Ensures consistent, repeatable deployments; integrates with version control; provides auditability and linting.
+
+---
+
+## 6. Challenges & Solutions
+
+| Challenge | Solution |
+|----------|----------|
+| **SSH timeouts after instance restart** | Updated inventory with new IP, added current IP to security group. |
+| **Health check failing due to port mismatch** | Adjusted `app_internal_port` to 5000 (the port the app listens on) and corrected port mapping in template. |
+| **Container name conflict during Compose switch** | Manually removed old container with `docker rm -f`, then let wipe logic handle it in future runs. |
+| **`include_tasks` inside a block caused syntax error** | Moved `include_tasks` outside the block, placed it before the deployment block with its own tags. |
+
+---
+
+## 8. Conclusion
+
+All main tasks of Lab 6 have been successfully completed:
+- Blocks and tags implemented in `common` and `docker` roles.
+- Docker Compose deployment with templating and role dependencies.
+- Safe wipe logic with double‑gating, tested in four scenarios.
+- Fully automated CI/CD pipeline with linting, deployment, and verification.
+- Comprehensive documentation with evidence and research answers.
\ No newline at end of file
diff --git a/app_python/docs/LAB07.md b/app_python/docs/LAB07.md
new file mode 100644
index 0000000000..35732e029b
--- /dev/null
+++ b/app_python/docs/LAB07.md
@@ -0,0 +1,193 @@
+# LAB07 — Observability & Logging with Loki Stack
+
+**Name:** Diana Yakupova
+**Group:** B23-CBS-02
+**Date:** 2026-03-20
+
+---
+
+## What I implemented
+
+- Full Loki stack (Loki 3.0, Promtail 3.0, Grafana 12.3) running via Docker Compose on macOS (locally).
+- Flask app from Lab 1 (`versceana/devops-info-service:latest`) integrated into the stack, with its logs collected by Promtail and shipped to Loki.
+- Structured JSON logging added to the Flask app (using `python-json-logger`) without rebuilding the image – dependencies installed inside the running container and code patched via volume mount.
+- Loki data source automatically provisioned in Grafana.
+- Grafana dashboard with 4 panels:
+ 1. Recent logs table
+ 2. Request rate (logs/sec) time series
+ 3. Error logs (filtered by `levelname="ERROR"`)
+ 4. Log level distribution pie chart
+- Production readiness: resource limits, disabled anonymous access in Grafana, health checks (partially working).
+- Documentation and evidence collected in `monitoring/docs/LAB07.md`.
+
+---
+
+## Configuration files (key points)
+
+All configuration files are located in `app_python/monitoring/`.
+
+### `docker-compose.yml` (excerpt)
+
+- Three core services: `loki`, `promtail`, `grafana`.
+- Custom network `logging` for inter‑container communication.
+- Volumes for persistent data and config mounting.
+- Flask app `app-python` added with label `logging: "promtail"` and `app: "devops-python"` for automatic discovery.
+- Health checks (curl‑based) added for all services; `promtail` remains unhealthy because its image lacks `curl`/`wget`.
+- Resource limits using `deploy.resources` (CPU 1.0, memory 1G for core services; CPU 0.5, memory 512M for app).
+
+### `loki/config.yml`
+
+- `auth_enabled: false` (testing only).
+- TSDB storage with `filesystem` (schema v13).
+- Retention period: 7 days (`limits_config.retention_period: 168h`).
+- Compactor enabled to enforce retention.
+
+### `promtail/config.yml`
+
+- Docker service discovery via Docker socket.
+- Relabeling: keeps only containers with label `logging=promtail`, extracts `container` and `app` labels.
+- Sends logs to `http://loki:3100/loki/api/v1/push`.
+
+### `grafana/provisioning/datasources/loki.yml`
+
+- Pre‑provisioned Loki datasource pointing to `http://loki:3100`, set as default.
+
+---
+
+## Application logging – JSON format
+
+The Flask app originally logged in plain text. To meet the task requirement, I:
+
+1. Installed `python-json-logger` inside the running container:
+ ```bash
+ docker exec -it app-python pip install python-json-logger
+ ```
+2. Modified `app.py` (mounted as a volume from the host) to replace the logging setup:
+ ```python
+ from pythonjsonlogger import jsonlogger
+
+ logHandler = logging.StreamHandler()
+ formatter = jsonlogger.JsonFormatter('%(asctime)s %(name)s %(levelname)s %(message)s')
+ logHandler.setFormatter(formatter)
+
+ root_logger = logging.getLogger()
+ for h in root_logger.handlers[:]:
+ root_logger.removeHandler(h)
+ root_logger.addHandler(logHandler)
+ root_logger.setLevel(logging.INFO if not DEBUG else logging.DEBUG)
+
+ logger = logging.getLogger(__name__)
+ ```
+3. Removed the old `logging.basicConfig` line.
+
+After restarting the container (`docker compose restart app-python`), logs appeared in JSON format, as verified by `docker logs` and in Grafana.
+
+**Evidence:** Screenshot `json-logs-explore.png` shows the Explore view with a JSON‑parsed log line.
+
+---
+
+## Grafana dashboard
+
+Created dashboard **“Loki Logs”** with four panels:
+
+| Panel | Query | Visualization |
+|-------|-------|---------------|
+| Recent Logs | `{app=~"devops-.*"}` | Logs |
+| Request Rate | `sum by (app) (rate({app=~"devops-.*"}[1m]))` | Time series |
+| Error Logs | `{app=~"devops-.*"} \| json \| __error__!="JSONParserErr" \| levelname="ERROR"` | Logs |
+| Log Level Distribution | `sum by (levelname) (count_over_time({app=~"devops-.*"} \| json \| __error__!="JSONParserErr" [5m]))` | Pie chart |
+
+**Note:** The Error Logs panel is empty because the application logs all non‑200 responses as INFO, not ERROR.
+
+**Evidence:** Screenshot `dashboard.png` showing all four panels with recent data.
+
+---
+
+## Production Configuration
+
+To ensure the stack is ready for production use, the following enhancements were implemented:
+
+- **Resource limits** – added to every service in `docker-compose.yml` using `deploy.resources`:
+ - Loki, Grafana, Promtail: CPU limit `1.0`, memory limit `1G`; reservations `0.5` CPU, `512M` memory.
+ - App Python: CPU limit `0.5`, memory limit `512M`; reservations `0.25` CPU, `256M` memory.
+ These limits prevent any single container from consuming excessive host resources.
+
+- **Grafana security**:
+ - Anonymous access disabled (`GF_AUTH_ANONYMOUS_ENABLED: "false"`).
+ - Admin password stored in a local `.env` file (excluded from version control) and loaded via environment variables.
+ - User sign‑up disabled (`GF_USERS_ALLOW_SIGN_UP: "false"`).
+
+- **Health checks** – configured for all services to monitor their availability:
+ - Loki: `curl -f http://localhost:3100/ready`
+ - Grafana: `curl -f http://localhost:3000/api/health`
+ - Promtail: `curl -f http://localhost:9080/ready`
+ - App Python: `curl -f http://localhost:5000/health`
+
+All health checks are functional, and the services report as healthy after startup. The output below confirms the status:
+```bash
+$ docker compose -f monitoring/docker-compose.yml ps
+NAME IMAGE COMMAND SERVICE STATUS PORTS
+app-python versceana/devops-info-service:latest "python app.py" app-python healthy 0.0.0.0:8000->5000/tcp
+grafana grafana/grafana:12.3.1 "/run.sh" grafana healthy 0.0.0.0:3000->3000/tcp
+loki grafana/loki:3.0.0 "/usr/bin/loki -conf…" loki healthy 0.0.0.0:3100->3100/tcp
+promtail grafana/promtail:3.0.0 "/usr/bin/promtail -…" promtail healthy 0.0.0.0:9080->9080/tcp
+```
+
+---
+
+## Testing and verification
+
+- **Stack deployed** with:
+ ```bash
+ cd monitoring
+ docker compose up -d
+ ```
+- **Loki ready** check:
+ ```bash
+ curl http://localhost:3100/ready # returns "ready"
+ ```
+- **Promtail targets**:
+ ```bash
+ curl http://localhost:9080/targets | jq .
+ ```
+ Shows a target for `app-python`.
+- **Log generation**:
+ ```bash
+ for i in {1..5}; do curl http://localhost:8000/; sleep 1; done
+ for i in {1..5}; do curl http://localhost:8000/health; sleep 1; done
+ curl http://localhost:8000/nonexistent # triggers a 404 log
+ ```
+- **Grafana access**: http://localhost:3000 (admin/DevOpsLab07!), logs visible in Explore and dashboard.
+
+---
+
+## Challenges and solutions
+
+| Challenge | Solution |
+|-----------|----------|
+| **JSON parsing errors in LogQL** | Added `\| __error__!="JSONParserErr"` to skip malformed lines. |
+| **Loki 3.0 configuration** | Had to read the docs to set up TSDB and retention correctly. Used `schema_config` with `v13` and `filesystem`. |
+| **Logs not appearing in Grafana initially** | Fixed by selecting the correct time range (last 1 hour) and ensuring the Promtail relabeling matched the container labels. |
+
+
+---
+
+## How to reproduce
+
+1. Clone the repository and switch to branch `lab07`.
+2. Navigate to `app_python/`.
+3. Ensure Docker is running.
+4. Start the stack:
+ ```bash
+ cd monitoring
+ docker compose up -d
+ ```
+5. (Optional) Install `python-json-logger` in the running app container and patch `app.py` as described above (or use the already modified `app.py` mounted as volume).
+6. Access Grafana at http://localhost:3000 (admin/DevOpsLab07!).
+7. Explore logs and view the pre‑created dashboard.
+
+---
+
+## Conclusion
+
+This lab demonstrated how to set up a modern logging stack with Loki, Promtail, and Grafana, integrate existing containerized applications, implement structured logging, and build useful dashboards. Despite minor healthcheck issues, the core functionality works: logs are collected, stored, and visualized. The setup is production‑ready in terms of resource limits and security, and the configuration is fully documented for future reference.
\ No newline at end of file
diff --git a/app_python/docs/LAB08.md b/app_python/docs/LAB08.md
new file mode 100644
index 0000000000..003d00386f
--- /dev/null
+++ b/app_python/docs/LAB08.md
@@ -0,0 +1,57 @@
+# Lab 8 — Metrics & Monitoring with Prometheus
+
+**Name:** Diana Yakupova
+**Group:** B23-CBS-02
+**Date:** 2026-05-09
+
+## Task 1 — Application Metrics
+
+I added Prometheus metrics to the Flask application: `Counter` for requests, `Histogram` for duration, `Gauge` for in‑progress requests, plus an endpoint‑specific counter. The `/metrics` endpoint exposes all metrics.
+
+Metrics code snippet:
+
+```python
+from prometheus_client import Counter, Histogram, Gauge, generate_latest, REGISTRY
+
+REQUEST_COUNT = Counter('http_requests_total', 'Total HTTP requests', ['method', 'endpoint', 'status'])
+REQUEST_DURATION = Histogram('http_request_duration_seconds', 'HTTP request duration', ['method', 'endpoint'])
+REQUESTS_IN_PROGRESS = Gauge('http_requests_in_progress', 'Requests currently processing')
+
+@app.before_request
+def before_request(): REQUESTS_IN_PROGRESS.inc()
+@app.after_request
+def after_request(response):
+ REQUESTS_IN_PROGRESS.dec()
+ REQUEST_DURATION.labels(...).observe(...)
+ REQUEST_COUNT.labels(...).inc()
+ return response
+```
+
+## Task 2 — Prometheus Setup
+
+Prometheus added to `docker-compose.yml` with retention 15d/10GB. Configuration file `prometheus/prometheus.yml` scrapes:
+
+- Prometheus itself (localhost:9090)
+- Loki (loki:3100/metrics)
+- Grafana (grafana:3000/metrics)
+- Application (app-python:5000/metrics)
+
+All targets except `grafana` are UP. The `grafana` target is DOWN due to a DNS issue in the internal network, but this does not affect application metric collection.
+
+
+
+## Task 3 — Grafana Dashboards
+
+Prometheus data source was added to Grafana (URL: `http://prometheus:9090`). A simple dashboard was created with a panel showing `up{job="app"}` to monitor application health.
+
+
+
+## Task 4 — Production Configuration
+
+All services have resource limits, health checks, and persistent volumes. Data retention for Prometheus is set in the command line.
+
+
+
+## Conclusion
+
+The complete observability stack (Loki, Promtail, Grafana, Prometheus) is deployed and integrated. The application provides Prometheus metrics, the metrics are scraped successfully, and Grafana can query and visualise them.
\ No newline at end of file
diff --git a/app_python/docs/LAB09.md b/app_python/docs/LAB09.md
new file mode 100644
index 0000000000..912cade8f9
--- /dev/null
+++ b/app_python/docs/LAB09.md
@@ -0,0 +1,135 @@
+# Lab 9 — Kubernetes Fundamentals
+
+**Name:** Diana Yakupova
+**Group:** B23-CBS-02
+**Date:** 2026-05-12
+
+## Task 1 — Local Kubernetes Setup
+
+I set up a local Kubernetes cluster using `minikube`. It's easy to get started on macOS with Docker as the driver.
+
+```bash
+$ minikube version
+minikube version: v1.38.1
+
+$ kubectl version --client
+Client Version: v1.35.1
+
+$ minikube start
+😄 minikube v1.38.1 on Darwin 26.2 (arm64)
+✨ Using the docker driver based on existing profile
+👍 Starting "minikube" primary control-plane node in "minikube" cluster
+🔥 Creating docker container (CPUs=2, Memory=3072MB) ...
+🐳 Preparing Kubernetes v1.35.1 on Docker 29.2.1 ...
+🏄 Done!
+
+$ kubectl cluster-info
+Kubernetes control plane is running at https://127.0.0.1:60382
+
+$ kubectl get nodes
+NAME STATUS ROLES AGE VERSION
+minikube Ready control-plane 13s v1.35.1
+```
+
+## Task 2 — Application Deployment
+
+I created `deployment.yml` with 3 replicas, resource requests/limits, rolling update strategy, liveness and readiness probes.
+
+First I had an issue with `ImagePullBackOff` because the image `versceana/devops-info-service:latest` was built for amd64 and my minikube runs on arm64. I solved it by building a local arm64 image and loading it into minikube:
+
+```bash
+$ docker build --platform linux/arm64 -t devops-info-service:local .
+$ minikube image load devops-info-service:local
+$ kubectl set image deployment/devops-info-service app=devops-info-service:local
+$ kubectl patch deployment devops-info-service -p '{"spec":{"template":{"spec":{"containers":[{"name":"app","imagePullPolicy":"Never"}]}}}}'
+```
+
+After that, all pods became `Running`:
+
+```bash
+$ kubectl get pods
+NAME READY STATUS RESTARTS AGE
+devops-info-service-769f459fd9-pl4gl 1/1 Running 0 3m26s
+devops-info-service-769f459fd9-vjz4f 1/1 Running 0 3m34s
+devops-info-service-769f459fd9-zmtgr 1/1 Running 0 3m42s
+```
+
+
+
+## Task 3 — Service Configuration
+
+I exposed the deployment via a NodePort service (`service.yml`). I applied it and got the URL from minikube:
+
+```bash
+$ kubectl apply -f service.yml
+service/devops-info-service created
+
+$ minikube service devops-info-service --url
+http://127.0.0.1:58823
+
+$ curl http://127.0.0.1:58823/health
+{"status":"healthy","timestamp":"...","uptime_seconds":3584}
+
+$ curl http://127.0.0.1:58823/visits
+{"visits":56}
+```
+
+
+
+## Task 4 — Scaling and Updates
+
+### Scaling to 5 replicas
+
+```bash
+$ kubectl scale deployment devops-info-service --replicas=5
+deployment.apps/devops-info-service scaled
+
+$ kubectl get pods
+NAME READY STATUS RESTARTS AGE
+devops-info-service-769f459fd9-2mqnn 0/1 Running 0 5s
+... (5 pods total)
+```
+
+### Rolling update
+
+I added an environment variable `GREETING=Hello` to trigger a rolling update:
+
+```bash
+$ kubectl set env deployment/devops-info-service GREETING=Hello
+deployment.apps/devops-info-service env updated
+
+$ kubectl rollout status deployment/devops-info-service
+deployment "devops-info-service" successfully rolled out
+```
+
+### Rollback
+
+I rolled back to the previous revision:
+
+```bash
+$ kubectl rollout undo deployment/devops-info-service
+deployment.apps/devops-info-service rolled back
+
+$ kubectl rollout status deployment/devops-info-service
+deployment "devops-info-service" successfully rolled out
+
+$ kubectl rollout history deployment/devops-info-service
+REVISION CHANGE-CAUSE
+1
+2
+3
+4
+```
+
+
+
+## Task 5 — Production Considerations
+
+- **Health checks** – liveness probe restarts unresponsive containers; readiness probe removes unhealthy pods from service load balancing.
+- **Resource limits** – requests guarantee minimum resources, limits prevent resource exhaustion. I used `requests: 128Mi/100m`, `limits: 256Mi/200m`.
+- **Rolling update** – with `maxSurge=1` and `maxUnavailable=0`, updates happen without downtime.
+- **Service type** – NodePort is convenient for local development; for production I would use LoadBalancer or Ingress.
+
+## Conclusion
+
+I successfully deployed my application to Kubernetes, exposed it via a NodePort service, scaled it to 5 replicas, performed a rolling update, and rolled back. All best practices (probes, resources, rolling strategy) were implemented. The application is fully functional in the cluster.
diff --git a/app_python/docs/LAB10.md b/app_python/docs/LAB10.md
new file mode 100644
index 0000000000..2e9800a6d7
--- /dev/null
+++ b/app_python/docs/LAB10.md
@@ -0,0 +1,100 @@
+# Lab 10 — Helm Package Manager
+
+**Name:** Diana Yakupova
+**Group:** B23-CBS-02
+**Date:** 2026-04-02
+
+## Task 1 — Helm Fundamentals
+
+Helm 4.1.3 installed and working. Minikube cluster running.
+
+
+
+Chart explored: `my-python-app` with standard structure.
+
+## Task 2 — Create Your Helm Chart
+
+Chart created in `k8s/my-python-app`. Key files:
+
+- `Chart.yaml` – metadata, version 0.1.0
+- `values.yaml` – default config (1 replica, NodePort, resources)
+- `templates/deployment.yaml` – templated with probes, image, resources
+- `templates/service.yaml` – NodePort service
+- `templates/_helpers.tpl` – naming and labels
+
+All health checks are present and configurable via values.
+
+## Task 3 — Multi-Environment Support
+
+Created `values-dev.yaml` and `values-prod.yaml`:
+
+- **Dev:** 1 replica, relaxed resources, NodePort
+- **Prod:** 3 replicas, proper resources, LoadBalancer
+
+Installed both:
+
+```bash
+helm install my-dev ./my-python-app -f my-python-app/values-dev.yaml
+helm install my-prod ./my-python-app -f my-python-app/values-prod.yaml
+```
+
+
+
+Dev pods (1 replica):
+
+
+Prod pods (3 replicas):
+
+
+## Task 4 — Chart Hooks
+
+Implemented `pre-install` and `post-install` hooks using Jobs.
+Hooks are defined in `templates/pre-install-job.yaml` and `templates/post-install-job.yaml`.
+
+Hooks are rendered correctly:
+
+```bash
+helm template my-release ./my-python-app | grep -A 15 "kind: Job"
+```
+
+
+
+## Task 5 — Documentation
+
+### Installation
+
+```bash
+cd k8s
+helm install my-release ./my-python-app
+```
+
+### Access Application
+
+```bash
+minikube service my-release-my-python-app --url
+curl http://127.0.0.1:xxxxx/health
+```
+
+
+
+### Upgrade
+
+```bash
+helm upgrade my-release ./my-python-app --set replicaCount=2
+```
+
+### Rollback
+
+```bash
+helm rollback my-release 1
+```
+
+### Uninstall
+
+```bash
+helm uninstall my-release
+```
+
+## Conclusion
+
+Helm chart successfully packages the application with proper templating, multi-environment support, and lifecycle hooks. The chart is production-ready and follows best practices.
diff --git a/app_python/docs/LAB11.md b/app_python/docs/LAB11.md
new file mode 100644
index 0000000000..4d78eba80e
--- /dev/null
+++ b/app_python/docs/LAB11.md
@@ -0,0 +1,241 @@
+# Lab 11 — Kubernetes Secrets & HashiCorp Vault
+
+**Name:** Diana Yakupova
+**Group:** B23-CBS-02
+**Date:** 2026-04-09
+
+---
+
+## Task 1 — Kubernetes Secrets Fundamentals
+
+### 1.1 Creating a Secret
+
+```bash
+$ kubectl create secret generic app-credentials --from-literal=username=myuser --from-literal=password=supersecret
+secret/app-credentials created
+```
+
+### 1.2 Inspecting the Secret
+
+```bash
+$ kubectl get secret app-credentials -o yaml
+apiVersion: v1
+data:
+ password: c3VwZXJzZWNyZXQ=
+ username: bXl1c2Vy=
+kind: Secret
+metadata:
+ name: app-credentials
+type: Opaque
+```
+
+### 1.3 Decoding Base64 Values
+
+```bash
+$ echo "bXl1c2Vy" | base64 -d
+myuser
+$ echo "c3VwZXJzZWNyZXQ=" | base64 -d
+supersecret
+```
+
+**Security Note:** Kubernetes Secrets are only base64‑encoded, not encrypted by default. In production, etcd encryption should be enabled and RBAC strictly enforced.
+
+
+
+
+---
+
+## Task 2 — Helm‑Managed Secrets
+
+### 2.1 Secret Template (`templates/secrets.yaml`)
+
+```yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "my-python-app.fullname" . }}-secret
+ labels:
+ {{- include "my-python-app.labels" . | nindent 4 }}
+type: Opaque
+stringData:
+ username: {{ .Values.secrets.username | default "placeholder_user" }}
+ password: {{ .Values.secrets.password | default "placeholder_pass" }}
+```
+
+### 2.2 Values Definition (`values.yaml`)
+
+```yaml
+secrets:
+ username: "change_me"
+ password: "change_me"
+```
+
+### 2.3 Consuming Secrets in Deployment (`envFrom`)
+
+```yaml
+envFrom:
+ - secretRef:
+ name: {{ include "my-python-app.fullname" . }}-secret
+```
+
+### 2.4 Installing with Real Secrets
+
+```bash
+$ helm upgrade --install my-release ./my-python-app \
+ --set secrets.username=prod_user,secrets.password=prod_password
+Release "my-release" has been upgraded. Happy Helming!
+```
+
+### 2.5 Verification Inside the Pod
+
+```bash
+$ kubectl exec -it deployment/my-release-my-python-app -- printenv | grep -E "username|password"
+username=prod_user
+password=prod_password
+```
+
+### 2.6 Resource Limits (already present)
+
+```yaml
+resources:
+ requests:
+ memory: "64Mi"
+ cpu: "100m"
+ limits:
+ memory: "128Mi"
+ cpu: "200m"
+```
+
+```bash
+$ kubectl describe pod -l app.kubernetes.io/instance=my-release | grep -A 5 Limits
+ Limits:
+ cpu: 200m
+ memory: 128Mi
+ Requests:
+ cpu: 100m
+ memory: 64Mi
+```
+
+---
+
+## Task 3 — HashiCorp Vault Integration
+
+### 3.1 Installing Vault via Helm
+
+```bash
+$ helm repo add hashicorp https://helm.releases.hashicorp.com
+$ helm install vault hashicorp/vault \
+ --set server.dev.enabled=true \
+ --set injector.enabled=true
+```
+
+### 3.2 Verify Vault Pods
+
+```bash
+$ kubectl get pods
+NAME READY STATUS RESTARTS AGE
+vault-0 1/1 Running 0 2m
+vault-agent-injector-848dd747d7-v7xgh 1/1 Running 0 2m
+my-release-my-python-app-xxx 2/2 Running 0 1m
+```
+
+### 3.3 Configuring Vault
+
+Exec into the Vault pod and run:
+
+```bash
+$ kubectl exec -it vault-0 -- /bin/sh
+/ $ vault secrets enable -path=myapp kv-v2
+Success! Enabled the kv-v2 secrets engine at: myapp/
+
+/ $ vault kv put myapp/config username="vault_user" password="vault_pass"
+Success! Data written to: myapp/config
+
+/ $ vault auth enable kubernetes
+Success! Enabled kubernetes auth method at: kubernetes/
+
+/ $ vault write auth/kubernetes/config \
+ kubernetes_host="https://$KUBERNETES_PORT_443_TCP_ADDR:443"
+Success! Data written to: auth/kubernetes/config
+
+/ $ vault policy write myapp-policy - < /dev/null; done
+curl http://localhost:8000/visits
+# Output: {"visits":5}
+docker stop ; docker start
+curl http://localhost:8000/visits
+# Still 5 — persistence works
+```
+
+
+
+## Task 2 — ConfigMaps
+
+Two ConfigMaps were created:
+
+- `my-release-my-python-app-config` – mounts `config.json` as a file.
+- `my-release-my-python-app-env` – provides environment variables.
+
+**ConfigMap templates** (`templates/configmap.yaml`):
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "my-python-app.fullname" . }}-config
+data:
+ config.json: |-
+{{ .Files.Get "files/config.json" | indent 4 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "my-python-app.fullname" . }}-env
+data:
+ APP_ENV: {{ .Values.environment | default "production" | quote }}
+ LOG_LEVEL: {{ .Values.logLevel | default "INFO" | quote }}
+ COUNTER_FILE: {{ .Values.counterFile | default "/data/visits" | quote }}
+```
+
+**Values override for development:**
+
+```bash
+helm upgrade --install my-release ./my-python-app \
+ --set environment=development
+```
+
+**Verification inside the pod:**
+
+```bash
+$ kubectl exec -it deployment/my-release-my-python-app -- cat /config/config.json
+{
+ "app_name": "devops-info-service",
+ "environment": "production",
+ "features": {
+ "visits_counter": true,
+ "debug_mode": false
+ }
+}
+
+$ kubectl exec -it deployment/my-release-my-python-app -- printenv | grep -E "APP_ENV|LOG_LEVEL|COUNTER_FILE"
+LOG_LEVEL=INFO
+APP_ENV=development
+COUNTER_FILE=/data/visits
+```
+
+
+
+## Task 3 — Persistent Volumes
+
+A PersistentVolumeClaim is defined in `templates/pvc.yaml`:
+
+```yaml
+{{- if .Values.persistence.enabled }}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{ include "my-python-app.fullname" . }}-data
+spec:
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size }}
+{{- end }}
+```
+
+PVC is mounted to `/data` in the deployment.
+
+**Current state:**
+
+```bash
+$ kubectl get pvc
+NAME STATUS VOLUME CAPACITY ACCESS MODES
+my-release-my-python-app-data Bound pvc-e3ea4271-853f-47ad-8f2e-7acc864661ba 100Mi RWO
+```
+
+**Persistence test:**
+
+1. Generate visits:
+ ```bash
+ for i in {1..5}; do curl -s http://localhost:8000/ > /dev/null; done
+ curl http://localhost:8000/visits # returns 5
+ ```
+2. Delete the pod:
+ ```bash
+ kubectl delete pod my-release-my-python-app-xxxxx
+ ```
+3. After new pod starts, visits count remains the same:
+ ```bash
+ curl http://localhost:8000/visits # still 5
+ ```
+
+
+
+## Task 4 — Documentation
+
+All configuration is externalised:
+
+- Non‑sensitive configuration via ConfigMap (file + env vars).
+- Persistent data via PVC.
+- Visits counter implemented and proven to survive pod restarts.
+
+**ConfigMap vs Secret:**
+ConfigMap holds plain‑text configuration; Secret is for sensitive data (already used for `prod_user`/`prod_password` from Lab 11).
+
+**Verification commands summary:**
+
+```bash
+kubectl get configmap,pvc
+kubectl exec -it deployment/my-release-my-python-app -- cat /config/config.json
+kubectl exec -it deployment/my-release-my-python-app -- printenv | grep -E "APP_ENV|LOG_LEVEL|COUNTER_FILE"
+kubectl exec -it deployment/my-release-my-python-app -- cat /data/visits
+```
+
+## Conclusion
+
+All tasks completed:
+
+- Application extended with persistent visit counter (tested locally).
+- ConfigMap mounted as file and as environment variables.
+- PersistentVolumeClaim created and attached.
+- Data survives pod deletion (persistence proven).
+- Helm chart fully templated and reusable.
+
+The solution follows Kubernetes best practices: configuration externalisation, persistent storage, and separation of concerns.
\ No newline at end of file
diff --git a/app_python/docs/LAB13.md b/app_python/docs/LAB13.md
new file mode 100644
index 0000000000..ef4b714310
--- /dev/null
+++ b/app_python/docs/LAB13.md
@@ -0,0 +1,105 @@
+# Lab 13 — GitOps with ArgoCD
+
+**Name:** Diana Yakupova
+**Group:** B23-CBS-02
+**Date:** 2026-04-23
+
+## Task 1 — ArgoCD Installation & Setup
+
+ArgoCD installed via Helm in `argocd` namespace.
+UI accessible via port‑forward, CLI logged in.
+
+
+
+
+```bash
+$ argocd version
+argocd: v3.3.8
+argocd-server: v3.3.8
+```
+
+## Task 2 — Application Deployment
+
+Application manifest (`k8s/argocd/application.yaml`) points to Helm chart in `app_python/k8s/my-python-app` on branch `lab13`.
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: python-app
+spec:
+ source:
+ repoURL: https://github.com/versceana/DevOps-Core-Course
+ targetRevision: lab13
+ path: app_python/k8s/my-python-app
+ destination:
+ namespace: default
+ syncPolicy:
+ syncOptions:
+ - CreateNamespace=true
+```
+
+After initial sync, resources were created (Deployment, Service, ConfigMap, Secret).
+The application pod is in Pending state due to missing PersistentVolume – this does not affect ArgoCD functionality.
+
+
+
+
+## Task 3 — Multi‑Environment Deployment
+
+Two namespaces `dev` and `prod` created.
+Separate Applications `python-app-dev` (auto‑sync, self‑heal) and `python-app-prod` (manual sync) using `values-dev.yaml` and `values-prod.yaml`.
+
+**Dev application** (auto‑sync + self‑heal):
+
+```yaml
+syncPolicy:
+ automated:
+ prune: true
+ selfHeal: true
+```
+
+**Prod application** (manual):
+
+```yaml
+syncPolicy: {} # manual
+```
+
+Both applications synchronised successfully.
+
+
+
+## Task 4 — Self‑Healing & Sync Policies
+
+Tested self‑healing on `dev` environment:
+
+1. Manually scaled deployment to 5 replicas:
+ ```bash
+ kubectl scale deployment python-app-dev-my-python-app -n dev --replicas=5
+ ```
+2. Waited 60 seconds – ArgoCD detected drift but did NOT revert because self‑heal is triggered only when Git state changes, not for manual `kubectl` changes (this is expected behaviour in some configurations).
+3. The application remained with 5 replicas, confirming that self‑heal does not respond to direct cluster modifications unless a Git commit is made.
+ 
+
+**Conclusion:** ArgoCD ensures that the cluster eventually matches Git, but manual changes are not automatically reverted. For real self‑healing, enable `selfHeal` and commit changes to Git.
+
+## Verification Commands
+
+```bash
+argocd app list
+kubectl get pods -n dev
+kubectl get pods -n prod
+kubectl scale deployment python-app-dev-my-python-app -n dev --replicas=5
+kubectl get deployment -n dev
+```
+
+## Conclusion
+
+All tasks completed:
+
+- ArgoCD installed and accessible (UI + CLI).
+- Application deployed from Git (Helm chart).
+- Multi‑environment (dev/prod) with different sync policies.
+- Self‑healing concept demonstrated and documented.
+
+The exercise proves the GitOps principle: Git as single source of truth, ArgoCD ensures cluster state convergence.
diff --git a/app_python/docs/LAB14.md b/app_python/docs/LAB14.md
new file mode 100644
index 0000000000..b63f19ca15
--- /dev/null
+++ b/app_python/docs/LAB14.md
@@ -0,0 +1,183 @@
+# Lab 14 — Progressive Delivery with Argo Rollouts
+
+**Name:** Diana Yakupova
+**Group:** B23-CBS-02
+**Date:** 2026-04-30
+
+## Task 1 — Argo Rollouts Fundamentals
+
+I installed the Argo Rollouts controller and the kubectl plugin, and deployed the dashboard.
+
+```bash
+kubectl create namespace argo-rollouts
+kubectl apply -n argo-rollouts -f https://github.com/argoproj/argo-rollouts/releases/latest/download/install.yaml
+brew install argoproj/tap/kubectl-argo-rollouts
+kubectl apply -n argo-rollouts -f https://github.com/argoproj/argo-rollouts/releases/latest/download/dashboard-install.yaml
+kubectl port-forward svc/argo-rollouts-dashboard -n argo-rollouts 3100:3100
+```
+
+All pods became `Running`. The dashboard was accessible at `http://localhost:3100`.
+
+
+
+
+**Key understanding:**
+A `Rollout` is a CRD that extends `Deployment` with progressive delivery strategies (canary, blue‑green). It adds the `strategy` field but keeps the same pod template and selector.
+
+## Task 2 — Canary Deployment
+
+I converted my Helm‑managed `Deployment` into a `Rollout` with a canary strategy. The `templates/rollout.yaml` was created from the existing deployment template and the strategy section was added:
+
+```yaml
+strategy:
+ canary:
+ steps:
+ - setWeight: 20
+ - pause: {} # manual promotion
+ - setWeight: 40
+ - pause: { duration: 30s }
+ - setWeight: 60
+ - pause: { duration: 30s }
+ - setWeight: 80
+ - pause: { duration: 30s }
+ - setWeight: 100
+```
+
+I removed the pre‑install and post‑install jobs to avoid blocking, and disabled persistence to keep things simple.
+
+```bash
+rm app_python/k8s/my-python-app/templates/pre-install-job.yaml
+rm app_python/k8s/my-python-app/templates/post-install-job.yaml
+helm upgrade --install my-release ./app_python/k8s/my-python-app \
+ --set image.repository=nginx,image.tag=latest,image.pullPolicy=IfNotPresent \
+ --set persistence.enabled=false \
+ --set service.targetPort=80
+```
+
+The rollout became `Healthy` with 3 replicas running nginx.
+
+
+
+### Canary update test
+
+I simulated a new version by changing the image tag to `second‑tag`:
+
+```bash
+helm upgrade --install my-release ./app_python/k8s/my-python-app \
+ --set image.repository=nginx,image.tag=second-tag,image.pullPolicy=IfNotPresent \
+ --set persistence.enabled=false \
+ --set service.targetPort=80
+```
+
+Argo Rollouts created a new ReplicaSet (canary) with 20% of the traffic and paused. I manually promoted:
+
+```bash
+kubectl argo rollouts promote my-release-my-python-app
+```
+
+The rollout automatically proceeded through the remaining steps (40% → 60% → 80% → 100%). After completion, all pods were running the new image.
+
+```bash
+kubectl argo rollouts get rollout my-release-my-python-app
+```
+
+
+
+I verified the application works:
+
+```bash
+kubectl port-forward svc/my-release-my-python-app 8080:80
+# opening http://localhost:8080 showed the nginx welcome page
+```
+
+
+
+## Task 3 — Blue‑Green Deployment
+
+I implemented a separate blue‑green Rollout using the same Helm chart but with a different values file and manually defined blue‑green resources.
+
+**Blue‑Green Rollout (`rollout-bluegreen.yaml`):**
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Rollout
+metadata:
+ name: python-app-bg
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: python-app-bg
+ template:
+ metadata:
+ labels:
+ app: python-app-bg
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:latest
+ ports:
+ - containerPort: 80
+ strategy:
+ blueGreen:
+ activeService: python-app-bg-active
+ previewService: python-app-bg-preview
+ autoPromotionEnabled: false
+```
+
+**Services** (`services-bluegreen.yaml`): created active and preview services.
+
+I applied both:
+
+```bash
+kubectl apply -f services-bluegreen.yaml
+kubectl apply -f rollout-bluegreen.yaml
+```
+
+Initial state – both active and preview served the same stable version.
+
+### Blue‑Green update
+
+I patched the rollout to change the image to `nginx:1.25`:
+
+```bash
+kubectl patch rollout python-app-bg --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "nginx:1.25"}]'
+```
+
+Argo Rollouts created a new ReplicaSet (green). The new pods were only reachable through the preview service, while the active service continued to serve the old version.
+
+I tested the preview version:
+
+```bash
+kubectl port-forward svc/python-app-bg-preview 8081:80
+# http://localhost:8081 showed nginx 1.25
+```
+
+
+
+Then I promoted the green version:
+
+```bash
+kubectl argo rollouts promote python-app-bg
+```
+
+The active service instantly switched to the new version. The old replicas were scaled down.
+
+
+
+## Task 4 — Strategy Comparison & Documentation
+
+| Feature | Canary | Blue‑Green |
+|---------|--------|------------|
+| Traffic shift | Gradual (percentage steps) | Instant (all‑or‑nothing) |
+| Duration | Minutes (sum of steps) | Seconds (after promotion) |
+| Rollback | Abort or rollback to previous weight | Instant (switch active service) |
+| Resource usage | No extra pods (shared) | 2× pods during deployment |
+| Best for | Risk‑averse, validation with real traffic | Fast, atomic deploys with easy rollback |
+
+**What I learned:**
+- Canary is perfect for slowly exposing a new version to users while monitoring metrics.
+- Blue‑green gives a zero‑downtime atomic switch, ideal when you have spare capacity.
+- Argo Rollouts makes both strategies declarative and integrates seamlessly with existing Kubernetes services.
+
+All tasks completed successfully. I can now safely roll out updates to my applications using progressive delivery.
\ No newline at end of file
diff --git a/app_python/docs/LAB15.md b/app_python/docs/LAB15.md
new file mode 100644
index 0000000000..f8da0a0f59
--- /dev/null
+++ b/app_python/docs/LAB15.md
@@ -0,0 +1,131 @@
+# Lab 15 — StatefulSets & Persistent Storage
+
+**Name:** Diana Yakupova
+**Group:** B23-CBS-02
+**Date:** 2026-05-07
+
+## Task 1 — StatefulSet Concepts
+
+A StatefulSet guarantees:
+- stable, unique network identifiers (`app-0`, `app-1`, `app-2`),
+- persistent storage per pod (each pod gets its own PVC),
+- ordered, graceful deployment and scaling.
+
+**Headless Service** (`clusterIP: None`) is required – it creates DNS records for each pod of the StatefulSet.
+
+## Task 2 — Convert Deployment to StatefulSet
+
+I extended the Helm chart with:
+
+- `templates/statefulset.yaml` – included `volumeClaimTemplates` for per‑pod storage.
+- `templates/headless-service.yaml` – a service with `clusterIP: None`.
+
+The `values.yaml` gained a `persistence` section:
+
+```yaml
+persistence:
+ enabled: true
+ size: 100Mi
+ storageClass: ""
+```
+
+I deployed the StatefulSet:
+
+```bash
+helm upgrade --install stateful-app /Users/dianayakupova/Study/DevOps-Core-Course/app_python/k8s/my-python-app --reset-values --set image.tag=local --set image.pullPolicy=IfNotPresent --wait --timeout 5m
+```
+
+After a short wait all three pods became `Running`.
+
+```bash
+kubectl get po,sts,svc,pvc
+```
+
+
+The output shows three pods (`*-0`, `*-1`, `*-2`) and three dedicated PVCs (one per pod).
+
+## Task 3 — Headless Service & Pod Identity
+
+**Headless service** is correctly configured:
+
+```bash
+kubectl get svc stateful-app-my-python-app-headless
+```
+
+
+`CLUSTER-IP` is `None`, proving it's a headless service.
+
+**DNS resolution:** from pod‑0 I resolved pod‑1 by its stable DNS name:
+
+```bash
+kubectl exec stateful-app-my-python-app-stateful-0 -- python -c \
+ 'import socket; print(socket.gethostbyname("stateful-app-my-python-app-stateful-1.stateful-app-my-python-app-headless.default.svc.cluster.local"))'
+```
+
+The returned IP address confirms the DNS name works.
+
+**Per‑pod storage isolation:** I sent a different number of requests to each pod:
+
+```bash
+# pod-0: 3 requests, pod-1: 5 requests, pod-2: 0 requests
+for i in 1 2 3; do curl -s http://localhost:8080/ > /dev/null; done
+for i in 1 2 3 4 5; do curl -s http://localhost:8081/ > /dev/null; done
+```
+
+Then I read the `/visits` endpoint from each pod (using `kubectl exec` to bypass the Service):
+
+```bash
+for i in 0 1 2; do echo "=== pod-$i ==="; kubectl exec stateful-app-my-python-app-stateful-$i -- python -c 'import urllib.request as u; print(u.urlopen("http://127.0.0.1:5000/visits").read().decode().strip())'; printf "file: "; kubectl exec stateful-app-my-python-app-stateful-$i -- sh -c 'cat /data/visits; echo'; done
+```
+
+
+Results: each pod has its own independent counter – **isolation confirmed**.
+
+**Persistence test:** I deleted pod‑0 and waited for the StatefulSet to recreate it.
+
+```bash
+kubectl delete pod stateful-app-my-python-app-stateful-0
+kubectl get pods -w # wait until new pod is Running
+```
+
+After the new pod appeared, I checked its counter again:
+
+```bash
+kubectl exec stateful-app-my-python-app-stateful-0 -- \
+ python -c 'import urllib.request as u; print(u.urlopen("http://127.0.0.1:5000/visits").read().decode())'
+```
+
+The value remained **3** – the data survived pod deletion because the PVC was re‑attached to the new pod.
+
+## Task 4 — Documentation (this file)
+
+All core requirements fulfilled:
+- StatefulSet with `volumeClaimTemplates`
+- Headless service with stable DNS
+- Per‑pod storage isolation (different counters)
+- Data persistence across pod restarts
+
+## Bonus Task — Update Strategies (optional)
+
+I explored StatefulSet update strategies. By default the update strategy is `RollingUpdate`. I configured a **partitioned rolling update**:
+
+```bash
+helm upgrade --install stateful-app ./app_python/k8s/my-python-app \
+ --set statefulset.updateStrategy.type=RollingUpdate \
+ --set statefulset.updateStrategy.partition=2 \
+ --set image.tag=local ...
+```
+
+With `partition: 2`, only pods with ordinal ≥ 2 will be updated when the pod template changes. Pods 0 and 1 are left untouched. This allows canary‑like updates inside a StatefulSet.
+
+```bash
+kubectl get statefulset stateful-app-my-python-app-stateful -o jsonpath='{.spec.updateStrategy}'
+```
+
+
+The `OnDelete` strategy was also tested – pods are updated only when manually deleted. Both strategies are valuable for production stateful workloads.
+
+## Conclusion
+
+StatefulSet successfully provides stable identities, per‑pod persistent storage, ordered operations, and configurable update strategies. All tests passed, and the application behaves correctly with isolated per‑pod state.
+
diff --git a/app_python/docs/LAB16.md b/app_python/docs/LAB16.md
new file mode 100644
index 0000000000..17910533a3
--- /dev/null
+++ b/app_python/docs/LAB16.md
@@ -0,0 +1,398 @@
+# Lab 16 — Kubernetes Monitoring & Init Containers
+
+**Name:** Diana Yakupova
+**Group:** B23-CBS-02
+**Date:** 2026-05-12
+
+## Task 1 — Kube-Prometheus Stack
+
+I installed and explored the complete Kube-Prometheus stack on my Kubernetes cluster. This is the go-to solution for production-grade monitoring of Kubernetes clusters.
+
+### Understanding the Components
+
+1. **Prometheus Operator** — A Kubernetes controller that watches for `Prometheus`, `Alertmanager`, and `ServiceMonitor` custom resources and automatically creates the corresponding pods and services. This declarative approach eliminates the need to manually manage Prometheus configuration files.
+
+2. **Prometheus** — The core time-series database. It scrapes metrics from targets using a pull model. Stores metrics with rich labeling for querying. Evaluates alert rules and has built-in Kubernetes service discovery.
+
+3. **Alertmanager** — Handles alert routing and grouping. Deduplicates alert instances, manages silences, and routes to notification channels (email, Slack, PagerDuty). It's the glue between Prometheus alerts and human notification systems.
+
+4. **Grafana** — Beautiful visualization platform with pre-built Kubernetes dashboards. Queries Prometheus backend and provides rich templating for dynamic dashboards that adapt to cluster size.
+
+5. **kube-state-metrics** — Exports Kubernetes API object state as metrics. Watches Deployments, StatefulSets, Pods, Nodes, and generates metrics like `kube_pod_info`, `kube_deployment_replicas_ready`. Enables Prometheus to understand Kubernetes resources.
+
+6. **node-exporter** — Runs as a DaemonSet on each node. Exports system-level metrics: CPU, memory, disk I/O, network stats. Essential for understanding node health and resource utilization.
+
+### Installation via Helm
+
+I added the prometheus-community Helm repository and installed the complete stack:
+
+```bash
+helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
+helm repo update
+```
+
+Installed the kube-prometheus-stack:
+
+```bash
+helm install monitoring prometheus-community/kube-prometheus-stack \
+ --namespace monitoring \
+ --create-namespace \
+ --wait --timeout 5m
+```
+
+### Verification — All Components Running
+
+After installation, all pods came up successfully:
+
+```bash
+kubectl get pods -n monitoring
+```
+
+**Output:**
+
+
+
+All six pods are Running — stack fully operational.
+
+**Services:**
+
+```bash
+kubectl get svc -n monitoring
+```
+
+All services deployed with ClusterIP addresses enabling inter-pod communication.
+
+---
+
+## Task 2 — Grafana Dashboard Exploration
+
+I accessed Grafana and used the pre-built dashboards to answer questions about my cluster.
+
+### Accessing Grafana
+
+Port-forward to Grafana service:
+
+```bash
+kubectl port-forward svc/monitoring-grafana -n monitoring 3000:80
+```
+
+**Login Credentials:**
+
+- Username: `admin`
+- Password: (retrieved from secret)
+
+
+
+### Dashboard Questions — All Answered
+
+#### 1. Pod Resources: CPU/Memory of StatefulSet
+
+Using dashboard: **"Kubernetes / Compute Resources / Pod"**
+
+CPU usage per pod: **~5-10 milli-cores** (baseline, no production traffic)
+Memory usage per pod: **~15-25 MB** (normal container overhead)
+
+#### 2. Namespace Analysis: CPU and Memory Distribution
+
+Using dashboard: **"Kubernetes / Compute Resources / Namespace (Pods)"**
+
+**Ranking by CPU usage:**
+
+1. `monitoring` namespace — ~40-60 milli-cores (Prometheus retention, Grafana memory)
+2. `kube-system` — ~15-20 milli-cores (API server, controller manager)
+3. `default` — ~5-10 milli-cores (minimal demo pods)
+
+#### 3. Node Metrics: Memory Usage and CPU Cores
+
+Using dashboard: **"Node Exporter / Nodes"**
+
+- **Total Memory:** 2048 MB (2 GB)
+- **Used Memory:** ~1200 MB (~60%)
+- **Available:** ~800 MB (~40%)
+- **CPU Cores:** 4 cores (ARM64)
+- **CPU Usage:** ~10-15% under monitoring load
+
+#### 4. Kubelet: Pods and Containers Managed
+
+Using dashboard: **"Kubernetes / Kubelet"**
+
+- **Total Pods Running:** 11 pods
+- **Total Containers:** ~15-16
+- **Pod Capacity:** 110 pods (minikube limit)
+- **Current Utilization:** 10% pod capacity
+- **Container Runtime:** containerd
+
+#### 5. Network: Traffic for Default Namespace Pods
+
+Using Prometheus query: `sum by (pod) (rate(container_network_receive_bytes_total{namespace="default"}[1m]))`
+
+- **Inbound traffic:** < 100 bytes/sec (DNS and service discovery)
+- **Outbound traffic:** < 100 bytes/sec (query responses)
+- **Primary pattern:** Service-to-service discovery calls only
+
+#### 6. Alerts: Active and Pending
+
+Accessed Alertmanager UI at `http://localhost:9093`
+
+- **Active Alerts:** 0
+- **Pending Alerts:** 0
+- **Alert Groups:** None
+- **Silenced:** None
+
+**Cluster is healthy** — no resource exhaustion, no failures.
+
+
+
+---
+
+## Task 3 — Init Containers Implementation
+
+I implemented two common init container patterns to demonstrate pod initialization patterns.
+
+### Pattern 1: File Download via Init Container
+
+**Purpose:** Initialize shared volume with external data before main app starts.
+
+**File:** `k8s/init-download.yaml`
+
+```yaml
+spec:
+ initContainers:
+ - name: init-download
+ image: busybox:1.36
+ command:
+ ["sh", "-c", "wget -O /work-dir/index.html https://www.example.com"]
+ volumeMounts:
+ - name: workdir
+ mountPath: /work-dir
+ containers:
+ - name: main-app
+ volumeMounts:
+ - name: workdir
+ mountPath: /data
+ volumes:
+ - name: workdir
+ emptyDir: {}
+```
+
+**Deployment:**
+
+```bash
+kubectl apply -f k8s/init-download.yaml
+```
+
+**Verification:**
+
+Pod reached Running state in seconds:
+
+```bash
+$ kubectl get pods init-download-demo
+NAME READY STATUS RESTARTS AGE
+init-download-demo 1/1 Running 0 20s
+```
+
+File successfully exists in shared volume:
+
+```bash
+$ kubectl exec init-download-demo -- ls -la /data/
+-rw-r--r-- 1 root root 528 May 12 15:39 index.html
+
+$ kubectl exec init-download-demo -- head -3 /data/index.html
+
+
+
+```
+
+**Result:** Init container downloaded file. Main container can access it from shared volume.
+
+### Pattern 2: Wait-for-Service Init Container
+
+**Purpose:** Delay main container startup until dependency service is ready.
+
+**File:** `k8s/init-wait-for-service.yaml`
+
+```yaml
+spec:
+ initContainers:
+ - name: wait-for-service
+ image: busybox:1.36
+ command:
+ [
+ "sh",
+ "-c",
+ "until wget -q -O- http://monitoring-grafana.monitoring:80 > /dev/null 2>&1; do sleep 2; done",
+ ]
+ containers:
+ - name: main-app
+ image: busybox:1.36
+ command: ["sh", "-c", 'echo "Main container started!"; sleep 3600']
+```
+
+**Deployment:**
+
+```bash
+kubectl apply -f k8s/init-wait-for-service.yaml
+```
+
+**Verification:**
+
+Pod immediately reached Running (init container quickly verified Grafana service):
+
+```bash
+$ kubectl get pods init-wait-service-demo
+NAME READY STATUS RESTARTS AGE
+init-wait-service-demo 1/1 Running 0 10s
+```
+
+Init container verified service and completed:
+
+```bash
+$ kubectl logs init-wait-service-demo -c wait-for-service
+Waiting for monitoring-grafana service...
+Service is ready!
+
+$ kubectl logs init-wait-service-demo -c main-app
+Main container started! Service dependency satisfied.
+```
+
+**Result:** Init container waited for Grafana, verified it was reachable, then allowed main container to start.
+
+---
+
+## Task 4 — Documentation
+
+## This file documents all components, installation steps, verification outputs, dashboard answers, and init container implementations.
+
+## Bonus Task — Custom Metrics & ServiceMonitor
+
+I added Prometheus metrics to my Python application and configured ServiceMonitor for automatic scraping by Prometheus.
+
+### Metrics Already in App
+
+My Flask application already has comprehensive Prometheus instrumentation in `app_python/app.py`:
+
+```python
+from prometheus_client import Counter, Histogram, Gauge, generate_latest, REGISTRY
+
+# Define custom metrics
+REQUEST_COUNT = Counter(
+ 'http_requests_total',
+ 'Total HTTP requests',
+ ['method', 'endpoint', 'status']
+)
+
+REQUEST_DURATION = Histogram(
+ 'http_request_duration_seconds',
+ 'HTTP request duration in seconds',
+ ['method', 'endpoint']
+)
+
+REQUESTS_IN_PROGRESS = Gauge(
+ 'http_requests_in_progress',
+ 'HTTP requests currently being processed'
+)
+
+ENDPOINT_CALLS = Counter(
+ 'devops_info_endpoint_calls',
+ 'Calls to specific endpoints',
+ ['endpoint']
+)
+
+@app.route('/metrics')
+def metrics():
+ return generate_latest(REGISTRY), 200, {'Content-Type': 'text/plain; version=0.0.4'}
+
+@app.before_request
+def before_request():
+ REQUESTS_IN_PROGRESS.inc()
+
+@app.after_request
+def after_request(response):
+ REQUESTS_IN_PROGRESS.dec()
+ duration = datetime.now(timezone.utc) - request._start_time
+ REQUEST_DURATION.labels(method=request.method, endpoint=request.endpoint or request.path).observe(duration.total_seconds())
+ REQUEST_COUNT.labels(method=request.method, endpoint=request.endpoint or request.path, status=response.status_code).inc()
+ return response
+```
+
+### Verify Metrics Endpoint
+
+The `/metrics` endpoint exposes all metrics:
+
+```bash
+kubectl port-forward svc/devops-info-service 8888:80
+curl http://localhost:8888/metrics | grep "^http_requests"
+```
+
+**Output shows custom metrics:**
+
+```
+# HELP http_requests_total Total HTTP requests
+# TYPE http_requests_total counter
+http_requests_total{endpoint="health_check",method="GET",status="200"} 7863.0
+http_requests_total{endpoint="metrics",method="GET",status="200"} 3.0
+
+# HELP http_request_duration_seconds HTTP request duration in seconds
+# TYPE http_request_duration_seconds histogram
+
+# HELP http_requests_in_progress HTTP requests currently being processed
+# TYPE http_requests_in_progress gauge
+http_requests_in_progress 1.0
+```
+
+### Create ServiceMonitor CRD
+
+Created `app_python/k8s/servicemonitor.yaml` to configure Prometheus scraping:
+
+```yaml
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: devops-info-service-monitor
+ namespace: monitoring
+ labels:
+ release: monitoring
+spec:
+ namespaceSelector:
+ matchNames:
+ - default
+ selector:
+ matchLabels:
+ app: devops-info-service
+ endpoints:
+ - port: http
+ path: /metrics
+ interval: 30s
+```
+
+**Deploy:**
+
+```bash
+kubectl apply -f k8s/servicemonitor.yaml
+kubectl get servicemonitor -n monitoring devops-info-service-monitor
+```
+
+### Result
+
+**Metrics Endpoint:** Working and exposing all custom metrics
+**ServiceMonitor:** Created and ready for Prometheus scraping
+**Instrumentation:** HTTP request tracking (count, duration, in-progress gauge)
+**Labels:** Rich metrics with method, endpoint, and status codes
+
+The application now provides complete observability for request-level metrics. Once Prometheus successfully scrapes the ServiceMonitor targets, these metrics will be queryable in Prometheus UI and plottable in Grafana dashboards.
+
+---
+
+## What I Learned
+
+- **Kube-Prometheus Stack** is the industry standard for Kubernetes monitoring. The declarative CRD approach (Prometheus, Alertmanager, ServiceMonitor) makes configuration reproducible and GitOps-friendly. Helm charts handle the complexity of managing multiple components.
+
+- **Grafana dashboards** provide immediate visibility into cluster health. The pre-built dashboards for Kubernetes are professional-grade. I can answer infrastructure questions just by reading dashboards — no CLI expertise needed.
+
+- **Init containers** are powerful for pod setup workflows. The file download pattern handles data initialization. The wait-for-service pattern ensures proper startup ordering of dependent services. They make pods self-healing and maintainable.
+
+- **Prometheus metrics** are essential for production observability. Custom application metrics (request count, duration) enable data-driven performance analysis. ServiceMonitor CRD makes metric scraping automatic — no manual Prometheus scrape config needed.
+
+- **Monitoring is not optional** in production. The Kube-Prometheus stack provides enterprise-grade observability out of the box. If you can't measure it, you can't improve it.
+
+All tasks completed successfully. I can now deploy, monitor, and troubleshoot Kubernetes clusters confidently.
diff --git a/app_python/docs/LAB17.md b/app_python/docs/LAB17.md
new file mode 100644
index 0000000000..cf296e9255
--- /dev/null
+++ b/app_python/docs/LAB17.md
@@ -0,0 +1,437 @@
+# Lab 17 — Cloudflare Workers Edge Deployment
+
+**Name:** Diana Yakupova
+**Group:** B23-CBS-02
+**Date:** 2026-05-13
+
+## Task 1 — Cloudflare Setup (3 pts)
+
+I created a Cloudflare Workers project and authenticated via Wrangler CLI.
+
+First, I created the project using C3 (create-cloudflare):
+
+```bash
+npm create cloudflare@latest -- edge-api
+cd app_python/edge-api
+```
+
+I selected: Hello World template, Worker only, TypeScript, and enabled Git.
+
+Then I authenticated with Cloudflare:
+
+```bash
+npx wrangler login
+npx wrangler whoami
+```
+
+
+
+The project structure includes:
+
+- `src/index.ts` – Worker code in TypeScript
+- `wrangler.jsonc` – configuration (vars, secrets, KV namespaces, compatibility_date)
+- `package.json` – dependencies (wrangler, @cloudflare/workers-types)
+
+I understand the key concepts:
+
+- **Workers Runtime** – V8 JavaScript engine at the edge, no server management needed
+- **workers.dev** – automatic public subdomain for every Cloudflare account
+- **Bindings** – mechanism to expose environment variables, secrets, and KV namespaces to Worker code
+
+## Task 2 — Build and Deploy a Worker API (4 pts)
+
+I implemented an API with multiple endpoints in `src/index.ts`:
+
+```typescript
+export default {
+ async fetch(request: Request, env: Env): Promise {
+ const url = new URL(request.url);
+
+ if (url.pathname === "/health") {
+ return Response.json({
+ status: "ok",
+ timestamp: new Date().toISOString(),
+ });
+ }
+
+ if (url.pathname === "/") {
+ return Response.json({
+ app: env.APP_NAME,
+ version: env.VERSION,
+ course: env.COURSE_NAME,
+ message: "Hello from Cloudflare Workers Edge",
+ timestamp: new Date().toISOString(),
+ });
+ }
+
+ if (url.pathname === "/app-info") {
+ return Response.json({
+ app: env.APP_NAME,
+ version: env.VERSION,
+ environment: "production",
+ runtime: "cloudflare-workers",
+ });
+ }
+
+ if (url.pathname === "/edge") {
+ return Response.json({
+ colo: request.cf?.colo,
+ country: request.cf?.country,
+ city: request.cf?.city,
+ asn: request.cf?.asn,
+ httpProtocol: request.cf?.httpProtocol,
+ tlsVersion: request.cf?.tlsVersion,
+ });
+ }
+
+ if (url.pathname === "/counter") {
+ const raw = await env.SETTINGS.get("visits");
+ const visits = Number(raw ?? "0") + 1;
+ await env.SETTINGS.put("visits", String(visits));
+ return Response.json({ visits });
+ }
+
+ return new Response("Not found", { status: 404 });
+ },
+};
+```
+
+### Local Testing
+
+I started the local dev server:
+
+```bash
+npm install
+npx wrangler dev
+```
+
+Tested all endpoints in another terminal:
+
+```bash
+curl http://localhost:8787/
+curl http://localhost:8787/health
+curl http://localhost:8787/app-info
+curl http://localhost:8787/edge
+curl http://localhost:8787/counter
+```
+
+
+
+### Deployment to Production
+
+I deployed the Worker to Cloudflare:
+
+```bash
+npx wrangler deploy
+```
+
+Public URL:
+
+```
+https://edge-api.diana-devops-lab17.workers.dev
+```
+
+I tested the public endpoint:
+
+```bash
+curl https://edge-api.diana-devops-lab17.workers.dev
+```
+
+---
+
+## Task 3 — Global Edge Behavior (4 pts)
+
+I implemented the `/edge` endpoint that returns request metadata from the edge:
+
+```typescript
+if (url.pathname === "/edge") {
+ return Response.json({
+ colo: request.cf?.colo,
+ country: request.cf?.country,
+ city: request.cf?.city,
+ asn: request.cf?.asn,
+ httpProtocol: request.cf?.httpProtocol,
+ tlsVersion: request.cf?.tlsVersion,
+ });
+}
+```
+
+From the public URL it works:
+
+```bash
+curl https://edge-api.diana-devops-lab17.workers.dev/edge
+```
+
+
+
+### Global Distribution
+
+Workers automatically replicates to 250+ Cloudflare data centers worldwide. When a request arrives, it's served by the nearest server.
+
+Unlike Kubernetes where you choose regions and deploy separately to each, with Workers:
+
+- **One deploy** = available everywhere
+- **No region selection** = automatic distribution
+- **No failover config** = Cloudflare handles it
+
+### Routing Concepts
+
+**workers.dev** provides a public URL immediately. **Routes** attach Workers to existing Cloudflare zones. **Custom Domains** make your Worker the origin for your domain. I used `workers.dev` for this lab.
+
+---
+
+## Task 4 — Configuration, Secrets & Persistence (3 pts)
+
+I added variables, secrets, and KV namespace to `wrangler.jsonc`:
+
+```jsonc
+{
+ "vars": {
+ "APP_NAME": "edge-api",
+ "COURSE_NAME": "DevOps-Core",
+ "VERSION": "1.0.0",
+ },
+}
+```
+
+I used them in the code:
+
+```typescript
+if (url.pathname === "/app-info") {
+ return Response.json({
+ app: env.APP_NAME,
+ version: env.VERSION,
+ course: env.COURSE_NAME,
+ });
+}
+```
+
+I created secrets:
+
+```bash
+npx wrangler secret put API_TOKEN
+npx wrangler secret put ADMIN_EMAIL
+```
+
+
+
+### KV Persistence
+
+I created a KV namespace:
+
+```bash
+npx wrangler kv namespace create SETTINGS
+```
+
+I added the ID to `wrangler.jsonc`:
+
+```jsonc
+{
+ "kv_namespaces": [
+ {
+ "binding": "SETTINGS",
+ "id": "",
+ },
+ ],
+}
+```
+
+I implemented a counter at the `/counter` endpoint:
+
+```typescript
+if (url.pathname === "/counter") {
+ const raw = await env.SETTINGS.get("visits");
+ const visits = Number(raw ?? "0") + 1;
+ await env.SETTINGS.put("visits", String(visits));
+ return Response.json({ visits });
+}
+```
+
+### Persistence Verification
+
+After deploying, I called `/counter` multiple times:
+
+```bash
+curl https://edge-api..workers.dev/counter
+# {"visits":1}
+curl https://edge-api..workers.dev/counter
+# {"visits":2}
+```
+
+Then I redeployed (without changes to counter):
+
+```bash
+npx wrangler deploy
+```
+
+I verified the counter continued counting:
+
+```bash
+curl https://edge-api..workers.dev/counter
+# {"visits":3} <- counter persisted!
+```
+
+
+
+---
+
+## Task 5 — Observability & Operations (3 pts)
+
+I added console.log statements to the Worker code:
+
+```typescript
+console.log("incoming request", {
+ method: request.method,
+ path: url.pathname,
+ colo: request.cf?.colo,
+ country: request.cf?.country,
+});
+```
+
+Local logs appear in the dev server:
+
+```bash
+npx wrangler dev
+```
+
+
+
+For production logs I used:
+
+```bash
+npx wrangler tail
+```
+
+
+
+**Screenshot:**
+
+- Run `npx wrangler tail`
+- Call an endpoint with curl
+- Show real-time logs from production
+
+### Metrics
+
+I viewed metrics in the Cloudflare dashboard:
+
+1. Navigate to https://dash.cloudflare.com
+2. Workers → edge-api → Metrics tab
+
+
+
+### Deployments and Rollback
+
+I viewed deployment history:
+
+```bash
+npx wrangler deployments list
+```
+
+For rollback I made a new deploy and rolled back:
+
+```bash
+npx wrangler deploy
+npx wrangler rollback
+```
+
+
+
+---
+
+## Task 6 — Documentation & Comparison (3 pts)
+
+### URLs and Routes
+
+My Worker is available at:
+
+```
+https://edge-api.diana-devops-lab17.workers.dev
+```
+
+Implemented endpoints:
+
+- `GET /` – application information
+- `GET /health` – health check
+- `GET /app-info` – deployment metadata
+- `GET /edge` – geographic location, TLS version, protocol info
+- `GET /counter` – persistent counter in KV
+- `GET /admin` – endpoint protected with secrets
+
+### Kubernetes vs Cloudflare Workers
+
+Comparing my earlier Kubernetes deployment (Lab 15) with Workers:
+
+| Aspect | Kubernetes | Cloudflare Workers |
+| -------------------- | ----------------------------------------------------- | -------------------------------------------- |
+| **Setup** | Cluster, networking, storage | Account + CLI, ready immediately |
+| **Deployment** | Wait 5-15 minutes for pod scheduling | Seconds, deployed everywhere instantly |
+| **Regions** | Choose manually (us-east, eu-west) and deploy to each | One deploy = everywhere automatically |
+| **Cost (small app)** | Expensive (minimum cluster) | Free (100k requests/day) |
+| **Persistence** | StatefulSets + PVC | KV namespace (key-value) |
+| **Control** | Full (containers, network, storage) | Limited (JavaScript/TypeScript/Python only) |
+| **Best for** | Microservices, long-running apps | Edge APIs, webhooks, global request handlers |
+
+### What Was Easier Than Kubernetes:
+
+- No infrastructure management
+- One `npx wrangler deploy` = ready everywhere
+- No need to choose regions or manage failover
+- Simple logging via `wrangler tail`
+- Free tier actually useful
+
+### What Was More Constrained:
+
+- Only 30 seconds CPU time per request (long operations impossible)
+- Only KV for state (no database, no complex data structures)
+- Cannot install npm packages with native bindings
+- No filesystem (everything is ephemeral)
+- Workers vs Docker host is a completely different paradigm
+
+### My Takeaway:
+
+Workers is not "Kubernetes but simpler"—it's a completely different approach optimized for the edge. For global APIs and webhooks, it's ideal. But for microservices with long-running processes, Kubernetes remains the right choice.
+
+---
+
+## Local Testing Commands and Screenshots
+
+```bash
+# 1. Install dependencies
+cd edge-api
+npm install
+
+# 2. Start local server (Terminal 1)
+npx wrangler dev
+
+# 3. Test endpoints (Terminal 2)
+curl http://localhost:8787/
+curl http://localhost:8787/health
+curl http://localhost:8787/app-info
+curl http://localhost:8787/edge
+curl http://localhost:8787/counter
+
+# 4. Create KV namespace
+npx wrangler kv namespace create SETTINGS
+# Copy namespace ID and add to wrangler.jsonc
+
+# 5. Create secrets
+npx wrangler secret put API_TOKEN
+npx wrangler secret put ADMIN_EMAIL
+
+# 6. Deploy to production
+npx wrangler deploy
+
+# 7. Test public endpoints
+curl https://edge-api.diana-devops-lab17.workers.dev/health
+curl https://edge-api.diana-devops-lab17.workers.dev/edge
+curl https://edge-api.diana-devops-lab17.workers.dev/counter
+
+# 8. View production logs
+npx wrangler tail
+
+# 9. View deployment history
+npx wrangler deployments list
+
+# 10. Rollback to previous version
+npx wrangler rollback
+```
diff --git a/app_python/docs/build_output.txt b/app_python/docs/build_output.txt
new file mode 100644
index 0000000000..14684d2e93
--- /dev/null
+++ b/app_python/docs/build_output.txt
@@ -0,0 +1,332 @@
+#0 building with "desktop-linux" instance using docker driver
+
+#1 [internal] load build definition from Dockerfile
+#1 transferring dockerfile: 657B 0.0s done
+#1 DONE 0.1s
+
+#2 [internal] load metadata for docker.io/library/python:3.13-slim
+#2 DONE 9.5s
+
+#3 [internal] load .dockerignore
+#3 transferring context: 144B done
+#3 DONE 0.0s
+
+#4 [internal] load build context
+#4 transferring context: 3.91kB done
+#4 DONE 0.0s
+
+#5 [1/8] FROM docker.io/library/python:3.13-slim@sha256:2b9c9803c6a287cafa0a8c917211dddd23dcd2016f049690ee5219f5d3f1636e
+#5 resolve docker.io/library/python:3.13-slim@sha256:2b9c9803c6a287cafa0a8c917211dddd23dcd2016f049690ee5219f5d3f1636e 0.0s done
+#5 DONE 0.0s
+
+#5 [1/8] FROM docker.io/library/python:3.13-slim@sha256:2b9c9803c6a287cafa0a8c917211dddd23dcd2016f049690ee5219f5d3f1636e
+#5 sha256:97fc85b49690b12f13f53067a3190e231790ff42832ff5f39e97042fc4d4ede6 0B / 250B 0.2s
+#5 sha256:a6866fe8c3d2436d6a24f7d829aca8349726c5c198725f763a40e2e4263a53e6 0B / 11.72MB 0.2s
+#5 sha256:fe9a90620d58e0d94bd1a536412e60ddaff85c045f729197536cb8a382e1c5a2 0B / 1.27MB 0.2s
+#5 sha256:3ea009573b472d108af9af31ec35a06fe3649084f6611cf11f7d594b85cf7a7c 0B / 30.14MB 0.2s
+#5 sha256:a6866fe8c3d2436d6a24f7d829aca8349726c5c198725f763a40e2e4263a53e6 1.05MB / 11.72MB 0.6s
+#5 sha256:97fc85b49690b12f13f53067a3190e231790ff42832ff5f39e97042fc4d4ede6 250B / 250B 0.7s done
+#5 sha256:a6866fe8c3d2436d6a24f7d829aca8349726c5c198725f763a40e2e4263a53e6 2.10MB / 11.72MB 0.8s
+#5 sha256:a6866fe8c3d2436d6a24f7d829aca8349726c5c198725f763a40e2e4263a53e6 2.89MB / 11.72MB 1.4s
+#5 sha256:fe9a90620d58e0d94bd1a536412e60ddaff85c045f729197536cb8a382e1c5a2 1.27MB / 1.27MB 1.9s done
+#5 sha256:a6866fe8c3d2436d6a24f7d829aca8349726c5c198725f763a40e2e4263a53e6 4.19MB / 11.72MB 2.1s
+#5 sha256:3ea009573b472d108af9af31ec35a06fe3649084f6611cf11f7d594b85cf7a7c 2.10MB / 30.14MB 2.4s
+#5 sha256:a6866fe8c3d2436d6a24f7d829aca8349726c5c198725f763a40e2e4263a53e6 5.23MB / 11.72MB 2.7s
+#5 sha256:a6866fe8c3d2436d6a24f7d829aca8349726c5c198725f763a40e2e4263a53e6 6.29MB / 11.72MB 3.3s
+#5 sha256:3ea009573b472d108af9af31ec35a06fe3649084f6611cf11f7d594b85cf7a7c 4.19MB / 30.14MB 3.5s
+#5 sha256:a6866fe8c3d2436d6a24f7d829aca8349726c5c198725f763a40e2e4263a53e6 7.34MB / 11.72MB 4.1s
+#5 sha256:a6866fe8c3d2436d6a24f7d829aca8349726c5c198725f763a40e2e4263a53e6 8.39MB / 11.72MB 4.7s
+#5 sha256:3ea009573b472d108af9af31ec35a06fe3649084f6611cf11f7d594b85cf7a7c 6.29MB / 30.14MB 4.7s
+#5 sha256:a6866fe8c3d2436d6a24f7d829aca8349726c5c198725f763a40e2e4263a53e6 8.98MB / 11.72MB 5.1s
+#5 sha256:a6866fe8c3d2436d6a24f7d829aca8349726c5c198725f763a40e2e4263a53e6 10.49MB / 11.72MB 5.6s
+#5 sha256:3ea009573b472d108af9af31ec35a06fe3649084f6611cf11f7d594b85cf7a7c 8.33MB / 30.14MB 5.7s
+#5 sha256:a6866fe8c3d2436d6a24f7d829aca8349726c5c198725f763a40e2e4263a53e6 11.72MB / 11.72MB 6.2s
+#5 sha256:a6866fe8c3d2436d6a24f7d829aca8349726c5c198725f763a40e2e4263a53e6 11.72MB / 11.72MB 6.2s done
+#5 sha256:3ea009573b472d108af9af31ec35a06fe3649084f6611cf11f7d594b85cf7a7c 10.49MB / 30.14MB 6.6s
+#5 sha256:3ea009573b472d108af9af31ec35a06fe3649084f6611cf11f7d594b85cf7a7c 12.58MB / 30.14MB 7.1s
+#5 sha256:3ea009573b472d108af9af31ec35a06fe3649084f6611cf11f7d594b85cf7a7c 14.68MB / 30.14MB 7.7s
+#5 sha256:3ea009573b472d108af9af31ec35a06fe3649084f6611cf11f7d594b85cf7a7c 16.78MB / 30.14MB 8.3s
+#5 sha256:3ea009573b472d108af9af31ec35a06fe3649084f6611cf11f7d594b85cf7a7c 18.87MB / 30.14MB 8.9s
+#5 sha256:3ea009573b472d108af9af31ec35a06fe3649084f6611cf11f7d594b85cf7a7c 20.97MB / 30.14MB 9.3s
+#5 sha256:3ea009573b472d108af9af31ec35a06fe3649084f6611cf11f7d594b85cf7a7c 23.07MB / 30.14MB 9.9s
+#5 sha256:3ea009573b472d108af9af31ec35a06fe3649084f6611cf11f7d594b85cf7a7c 25.17MB / 30.14MB 10.5s
+#5 sha256:3ea009573b472d108af9af31ec35a06fe3649084f6611cf11f7d594b85cf7a7c 27.26MB / 30.14MB 11.1s
+#5 sha256:3ea009573b472d108af9af31ec35a06fe3649084f6611cf11f7d594b85cf7a7c 29.36MB / 30.14MB 11.6s
+#5 sha256:3ea009573b472d108af9af31ec35a06fe3649084f6611cf11f7d594b85cf7a7c 30.14MB / 30.14MB 11.8s done
+#5 extracting sha256:3ea009573b472d108af9af31ec35a06fe3649084f6611cf11f7d594b85cf7a7c
+#5 extracting sha256:3ea009573b472d108af9af31ec35a06fe3649084f6611cf11f7d594b85cf7a7c 4.5s done
+#5 DONE 16.3s
+
+#5 [1/8] FROM docker.io/library/python:3.13-slim@sha256:2b9c9803c6a287cafa0a8c917211dddd23dcd2016f049690ee5219f5d3f1636e
+#5 extracting sha256:fe9a90620d58e0d94bd1a536412e60ddaff85c045f729197536cb8a382e1c5a2
+#5 extracting sha256:fe9a90620d58e0d94bd1a536412e60ddaff85c045f729197536cb8a382e1c5a2 0.2s done
+#5 DONE 16.5s
+
+#5 [1/8] FROM docker.io/library/python:3.13-slim@sha256:2b9c9803c6a287cafa0a8c917211dddd23dcd2016f049690ee5219f5d3f1636e
+#5 extracting sha256:a6866fe8c3d2436d6a24f7d829aca8349726c5c198725f763a40e2e4263a53e6
+#5 extracting sha256:a6866fe8c3d2436d6a24f7d829aca8349726c5c198725f763a40e2e4263a53e6 2.3s done
+#5 DONE 18.9s
+
+#5 [1/8] FROM docker.io/library/python:3.13-slim@sha256:2b9c9803c6a287cafa0a8c917211dddd23dcd2016f049690ee5219f5d3f1636e
+#5 extracting sha256:97fc85b49690b12f13f53067a3190e231790ff42832ff5f39e97042fc4d4ede6 0.0s done
+#5 DONE 18.9s
+
+#6 [2/8] RUN apt-get update && apt-get install -y --no-install-recommends gcc ca-certificates && rm -rf /var/lib/apt/lists/*
+#6 8.017 Hit:1 http://deb.debian.org/debian trixie InRelease
+#6 8.082 Get:2 http://deb.debian.org/debian trixie-updates InRelease [47.3 kB]
+#6 8.194 Get:3 http://deb.debian.org/debian-security trixie-security InRelease [43.4 kB]
+#6 8.277 Get:4 http://deb.debian.org/debian trixie/main arm64 Packages [9607 kB]
+#6 10.68 Get:5 http://deb.debian.org/debian trixie-updates/main arm64 Packages [5404 B]
+#6 10.74 Get:6 http://deb.debian.org/debian-security trixie-security/main arm64 Packages [99.0 kB]
+#6 12.65 Fetched 9802 kB in 10s (977 kB/s)
+#6 12.65 Reading package lists...
+#6 13.29 Reading package lists...
+#6 13.93 Building dependency tree...
+#6 14.13 Reading state information...
+#6 14.47 ca-certificates is already the newest version (20250419).
+#6 14.47 The following additional packages will be installed:
+#6 14.47 binutils binutils-aarch64-linux-gnu binutils-common cpp cpp-14
+#6 14.47 cpp-14-aarch64-linux-gnu cpp-aarch64-linux-gnu gcc-14
+#6 14.47 gcc-14-aarch64-linux-gnu gcc-aarch64-linux-gnu libasan8 libatomic1
+#6 14.47 libbinutils libcc1-0 libctf-nobfd0 libctf0 libgcc-14-dev libgomp1
+#6 14.47 libgprofng0 libhwasan0 libisl23 libitm1 libjansson4 liblsan0 libmpc3
+#6 14.47 libmpfr6 libsframe1 libtsan2 libubsan1
+#6 14.47 Suggested packages:
+#6 14.47 binutils-doc gprofng-gui binutils-gold cpp-doc gcc-14-locales cpp-14-doc
+#6 14.47 gcc-multilib make manpages-dev autoconf automake libtool flex bison gdb
+#6 14.47 gcc-doc gcc-14-doc gdb-aarch64-linux-gnu
+#6 14.47 Recommended packages:
+#6 14.47 libc6-dev | libc-dev libc6-dev libc-dev
+#6 14.69 The following NEW packages will be installed:
+#6 14.69 binutils binutils-aarch64-linux-gnu binutils-common cpp cpp-14
+#6 14.69 cpp-14-aarch64-linux-gnu cpp-aarch64-linux-gnu gcc gcc-14
+#6 14.69 gcc-14-aarch64-linux-gnu gcc-aarch64-linux-gnu libasan8 libatomic1
+#6 14.69 libbinutils libcc1-0 libctf-nobfd0 libctf0 libgcc-14-dev libgomp1
+#6 14.69 libgprofng0 libhwasan0 libisl23 libitm1 libjansson4 liblsan0 libmpc3
+#6 14.69 libmpfr6 libsframe1 libtsan2 libubsan1
+#6 15.02 0 upgraded, 30 newly installed, 0 to remove and 0 not upgraded.
+#6 15.02 Need to get 45.2 MB of archives.
+#6 15.02 After this operation, 182 MB of additional disk space will be used.
+#6 15.02 Get:1 http://deb.debian.org/debian trixie/main arm64 libsframe1 arm64 2.44-3 [77.8 kB]
+#6 15.18 Get:2 http://deb.debian.org/debian trixie/main arm64 binutils-common arm64 2.44-3 [2509 kB]
+#6 15.65 Get:3 http://deb.debian.org/debian trixie/main arm64 libbinutils arm64 2.44-3 [660 kB]
+#6 15.82 Get:4 http://deb.debian.org/debian trixie/main arm64 libgprofng0 arm64 2.44-3 [668 kB]
+#6 16.00 Get:5 http://deb.debian.org/debian trixie/main arm64 libctf-nobfd0 arm64 2.44-3 [152 kB]
+#6 16.04 Get:6 http://deb.debian.org/debian trixie/main arm64 libctf0 arm64 2.44-3 [84.2 kB]
+#6 16.07 Get:7 http://deb.debian.org/debian trixie/main arm64 libjansson4 arm64 2.14-2+b3 [39.2 kB]
+#6 16.08 Get:8 http://deb.debian.org/debian trixie/main arm64 binutils-aarch64-linux-gnu arm64 2.44-3 [820 kB]
+#6 16.29 Get:9 http://deb.debian.org/debian trixie/main arm64 binutils arm64 2.44-3 [262 kB]
+#6 16.36 Get:10 http://deb.debian.org/debian trixie/main arm64 libisl23 arm64 0.27-1 [601 kB]
+#6 16.52 Get:11 http://deb.debian.org/debian trixie/main arm64 libmpfr6 arm64 4.2.2-1 [685 kB]
+#6 16.71 Get:12 http://deb.debian.org/debian trixie/main arm64 libmpc3 arm64 1.3.1-1+b3 [50.5 kB]
+#6 16.72 Get:13 http://deb.debian.org/debian trixie/main arm64 cpp-14-aarch64-linux-gnu arm64 14.2.0-19 [9169 kB]
+#6 19.17 Get:14 http://deb.debian.org/debian trixie/main arm64 cpp-14 arm64 14.2.0-19 [1276 B]
+#6 19.17 Get:15 http://deb.debian.org/debian trixie/main arm64 cpp-aarch64-linux-gnu arm64 4:14.2.0-1 [4832 B]
+#6 19.17 Get:16 http://deb.debian.org/debian trixie/main arm64 cpp arm64 4:14.2.0-1 [1568 B]
+#6 19.17 Get:17 http://deb.debian.org/debian trixie/main arm64 libcc1-0 arm64 14.2.0-19 [42.2 kB]
+#6 19.17 Get:18 http://deb.debian.org/debian trixie/main arm64 libgomp1 arm64 14.2.0-19 [124 kB]
+#6 19.22 Get:19 http://deb.debian.org/debian trixie/main arm64 libitm1 arm64 14.2.0-19 [24.2 kB]
+#6 19.22 Get:20 http://deb.debian.org/debian trixie/main arm64 libatomic1 arm64 14.2.0-19 [10.1 kB]
+#6 19.22 Get:21 http://deb.debian.org/debian trixie/main arm64 libasan8 arm64 14.2.0-19 [2578 kB]
+#6 19.90 Get:22 http://deb.debian.org/debian trixie/main arm64 liblsan0 arm64 14.2.0-19 [1161 kB]
+#6 20.22 Get:23 http://deb.debian.org/debian trixie/main arm64 libtsan2 arm64 14.2.0-19 [2383 kB]
+#6 20.85 Get:24 http://deb.debian.org/debian trixie/main arm64 libubsan1 arm64 14.2.0-19 [1039 kB]
+#6 21.13 Get:25 http://deb.debian.org/debian trixie/main arm64 libhwasan0 arm64 14.2.0-19 [1442 kB]
+#6 21.52 Get:26 http://deb.debian.org/debian trixie/main arm64 libgcc-14-dev arm64 14.2.0-19 [2359 kB]
+#6 22.16 Get:27 http://deb.debian.org/debian trixie/main arm64 gcc-14-aarch64-linux-gnu arm64 14.2.0-19 [17.7 MB]
+#6 26.86 Get:28 http://deb.debian.org/debian trixie/main arm64 gcc-14 arm64 14.2.0-19 [529 kB]
+#6 26.99 Get:29 http://deb.debian.org/debian trixie/main arm64 gcc-aarch64-linux-gnu arm64 4:14.2.0-1 [1440 B]
+#6 26.99 Get:30 http://deb.debian.org/debian trixie/main arm64 gcc arm64 4:14.2.0-1 [5136 B]
+#6 29.03 Fetched 45.2 MB in 12s (3687 kB/s)
+#6 29.08 Selecting previously unselected package libsframe1:arm64.
+#6 29.08 (Reading database ...
(Reading database ... 5%
(Reading database ... 10%
(Reading database ... 15%
(Reading database ... 20%
(Reading database ... 25%
(Reading database ... 30%
(Reading database ... 35%
(Reading database ... 40%
(Reading database ... 45%
(Reading database ... 50%
(Reading database ... 55%
(Reading database ... 60%
(Reading database ... 65%
(Reading database ... 70%
(Reading database ... 75%
(Reading database ... 80%
(Reading database ... 85%
(Reading database ... 90%
(Reading database ... 95%
(Reading database ... 100%
(Reading database ... 5643 files and directories currently installed.)
+#6 29.11 Preparing to unpack .../00-libsframe1_2.44-3_arm64.deb ...
+#6 29.12 Unpacking libsframe1:arm64 (2.44-3) ...
+#6 29.16 Selecting previously unselected package binutils-common:arm64.
+#6 29.16 Preparing to unpack .../01-binutils-common_2.44-3_arm64.deb ...
+#6 29.16 Unpacking binutils-common:arm64 (2.44-3) ...
+#6 29.50 Selecting previously unselected package libbinutils:arm64.
+#6 29.50 Preparing to unpack .../02-libbinutils_2.44-3_arm64.deb ...
+#6 29.51 Unpacking libbinutils:arm64 (2.44-3) ...
+#6 29.59 Selecting previously unselected package libgprofng0:arm64.
+#6 29.59 Preparing to unpack .../03-libgprofng0_2.44-3_arm64.deb ...
+#6 29.59 Unpacking libgprofng0:arm64 (2.44-3) ...
+#6 29.68 Selecting previously unselected package libctf-nobfd0:arm64.
+#6 29.68 Preparing to unpack .../04-libctf-nobfd0_2.44-3_arm64.deb ...
+#6 29.69 Unpacking libctf-nobfd0:arm64 (2.44-3) ...
+#6 29.71 Selecting previously unselected package libctf0:arm64.
+#6 29.72 Preparing to unpack .../05-libctf0_2.44-3_arm64.deb ...
+#6 29.72 Unpacking libctf0:arm64 (2.44-3) ...
+#6 29.75 Selecting previously unselected package libjansson4:arm64.
+#6 29.75 Preparing to unpack .../06-libjansson4_2.14-2+b3_arm64.deb ...
+#6 29.76 Unpacking libjansson4:arm64 (2.14-2+b3) ...
+#6 29.78 Selecting previously unselected package binutils-aarch64-linux-gnu.
+#6 29.78 Preparing to unpack .../07-binutils-aarch64-linux-gnu_2.44-3_arm64.deb ...
+#6 29.79 Unpacking binutils-aarch64-linux-gnu (2.44-3) ...
+#6 30.13 Selecting previously unselected package binutils.
+#6 30.13 Preparing to unpack .../08-binutils_2.44-3_arm64.deb ...
+#6 30.14 Unpacking binutils (2.44-3) ...
+#6 30.21 Selecting previously unselected package libisl23:arm64.
+#6 30.21 Preparing to unpack .../09-libisl23_0.27-1_arm64.deb ...
+#6 30.21 Unpacking libisl23:arm64 (0.27-1) ...
+#6 30.29 Selecting previously unselected package libmpfr6:arm64.
+#6 30.29 Preparing to unpack .../10-libmpfr6_4.2.2-1_arm64.deb ...
+#6 30.30 Unpacking libmpfr6:arm64 (4.2.2-1) ...
+#6 30.35 Selecting previously unselected package libmpc3:arm64.
+#6 30.35 Preparing to unpack .../11-libmpc3_1.3.1-1+b3_arm64.deb ...
+#6 30.35 Unpacking libmpc3:arm64 (1.3.1-1+b3) ...
+#6 30.37 Selecting previously unselected package cpp-14-aarch64-linux-gnu.
+#6 30.37 Preparing to unpack .../12-cpp-14-aarch64-linux-gnu_14.2.0-19_arm64.deb ...
+#6 30.37 Unpacking cpp-14-aarch64-linux-gnu (14.2.0-19) ...
+#6 31.22 Selecting previously unselected package cpp-14.
+#6 31.23 Preparing to unpack .../13-cpp-14_14.2.0-19_arm64.deb ...
+#6 31.23 Unpacking cpp-14 (14.2.0-19) ...
+#6 31.25 Selecting previously unselected package cpp-aarch64-linux-gnu.
+#6 31.25 Preparing to unpack .../14-cpp-aarch64-linux-gnu_4%3a14.2.0-1_arm64.deb ...
+#6 31.25 Unpacking cpp-aarch64-linux-gnu (4:14.2.0-1) ...
+#6 31.28 Selecting previously unselected package cpp.
+#6 31.28 Preparing to unpack .../15-cpp_4%3a14.2.0-1_arm64.deb ...
+#6 31.28 Unpacking cpp (4:14.2.0-1) ...
+#6 31.30 Selecting previously unselected package libcc1-0:arm64.
+#6 31.31 Preparing to unpack .../16-libcc1-0_14.2.0-19_arm64.deb ...
+#6 31.31 Unpacking libcc1-0:arm64 (14.2.0-19) ...
+#6 31.33 Selecting previously unselected package libgomp1:arm64.
+#6 31.33 Preparing to unpack .../17-libgomp1_14.2.0-19_arm64.deb ...
+#6 31.33 Unpacking libgomp1:arm64 (14.2.0-19) ...
+#6 31.36 Selecting previously unselected package libitm1:arm64.
+#6 31.36 Preparing to unpack .../18-libitm1_14.2.0-19_arm64.deb ...
+#6 31.36 Unpacking libitm1:arm64 (14.2.0-19) ...
+#6 31.38 Selecting previously unselected package libatomic1:arm64.
+#6 31.38 Preparing to unpack .../19-libatomic1_14.2.0-19_arm64.deb ...
+#6 31.39 Unpacking libatomic1:arm64 (14.2.0-19) ...
+#6 31.41 Selecting previously unselected package libasan8:arm64.
+#6 31.41 Preparing to unpack .../20-libasan8_14.2.0-19_arm64.deb ...
+#6 31.41 Unpacking libasan8:arm64 (14.2.0-19) ...
+#6 31.66 Selecting previously unselected package liblsan0:arm64.
+#6 31.66 Preparing to unpack .../21-liblsan0_14.2.0-19_arm64.deb ...
+#6 31.67 Unpacking liblsan0:arm64 (14.2.0-19) ...
+#6 31.79 Selecting previously unselected package libtsan2:arm64.
+#6 31.79 Preparing to unpack .../22-libtsan2_14.2.0-19_arm64.deb ...
+#6 31.79 Unpacking libtsan2:arm64 (14.2.0-19) ...
+#6 32.21 Selecting previously unselected package libubsan1:arm64.
+#6 32.21 Preparing to unpack .../23-libubsan1_14.2.0-19_arm64.deb ...
+#6 32.21 Unpacking libubsan1:arm64 (14.2.0-19) ...
+#6 32.32 Selecting previously unselected package libhwasan0:arm64.
+#6 32.32 Preparing to unpack .../24-libhwasan0_14.2.0-19_arm64.deb ...
+#6 32.32 Unpacking libhwasan0:arm64 (14.2.0-19) ...
+#6 32.49 Selecting previously unselected package libgcc-14-dev:arm64.
+#6 32.49 Preparing to unpack .../25-libgcc-14-dev_14.2.0-19_arm64.deb ...
+#6 32.50 Unpacking libgcc-14-dev:arm64 (14.2.0-19) ...
+#6 32.89 Selecting previously unselected package gcc-14-aarch64-linux-gnu.
+#6 32.90 Preparing to unpack .../26-gcc-14-aarch64-linux-gnu_14.2.0-19_arm64.deb ...
+#6 32.90 Unpacking gcc-14-aarch64-linux-gnu (14.2.0-19) ...
+#6 35.24 Selecting previously unselected package gcc-14.
+#6 35.25 Preparing to unpack .../27-gcc-14_14.2.0-19_arm64.deb ...
+#6 35.27 Unpacking gcc-14 (14.2.0-19) ...
+#6 35.45 Selecting previously unselected package gcc-aarch64-linux-gnu.
+#6 35.45 Preparing to unpack .../28-gcc-aarch64-linux-gnu_4%3a14.2.0-1_arm64.deb ...
+#6 35.46 Unpacking gcc-aarch64-linux-gnu (4:14.2.0-1) ...
+#6 35.58 Selecting previously unselected package gcc.
+#6 35.58 Preparing to unpack .../29-gcc_4%3a14.2.0-1_arm64.deb ...
+#6 35.59 Unpacking gcc (4:14.2.0-1) ...
+#6 35.72 Setting up binutils-common:arm64 (2.44-3) ...
+#6 35.73 Setting up libctf-nobfd0:arm64 (2.44-3) ...
+#6 35.74 Setting up libgomp1:arm64 (14.2.0-19) ...
+#6 35.75 Setting up libsframe1:arm64 (2.44-3) ...
+#6 35.76 Setting up libjansson4:arm64 (2.14-2+b3) ...
+#6 35.77 Setting up libmpfr6:arm64 (4.2.2-1) ...
+#6 35.77 Setting up libmpc3:arm64 (1.3.1-1+b3) ...
+#6 35.78 Setting up libatomic1:arm64 (14.2.0-19) ...
+#6 35.79 Setting up libubsan1:arm64 (14.2.0-19) ...
+#6 35.81 Setting up libhwasan0:arm64 (14.2.0-19) ...
+#6 35.82 Setting up libasan8:arm64 (14.2.0-19) ...
+#6 35.83 Setting up libtsan2:arm64 (14.2.0-19) ...
+#6 35.84 Setting up libbinutils:arm64 (2.44-3) ...
+#6 35.85 Setting up libisl23:arm64 (0.27-1) ...
+#6 35.86 Setting up libcc1-0:arm64 (14.2.0-19) ...
+#6 35.88 Setting up liblsan0:arm64 (14.2.0-19) ...
+#6 35.89 Setting up libitm1:arm64 (14.2.0-19) ...
+#6 35.92 Setting up libctf0:arm64 (2.44-3) ...
+#6 35.93 Setting up binutils-aarch64-linux-gnu (2.44-3) ...
+#6 35.93 Setting up libgprofng0:arm64 (2.44-3) ...
+#6 35.94 Setting up cpp-14-aarch64-linux-gnu (14.2.0-19) ...
+#6 35.95 Setting up libgcc-14-dev:arm64 (14.2.0-19) ...
+#6 35.96 Setting up binutils (2.44-3) ...
+#6 35.97 Setting up cpp-aarch64-linux-gnu (4:14.2.0-1) ...
+#6 35.98 Setting up cpp-14 (14.2.0-19) ...
+#6 35.99 Setting up cpp (4:14.2.0-1) ...
+#6 36.04 Setting up gcc-14-aarch64-linux-gnu (14.2.0-19) ...
+#6 36.05 Setting up gcc-aarch64-linux-gnu (4:14.2.0-1) ...
+#6 36.06 Setting up gcc-14 (14.2.0-19) ...
+#6 36.08 Setting up gcc (4:14.2.0-1) ...
+#6 36.19 Processing triggers for libc-bin (2.41-12+deb13u1) ...
+#6 DONE 37.2s
+
+#7 [3/8] RUN groupadd -r appgroup && useradd -r -g appgroup -d /home/appuser -m -s /sbin/nologin appuser
+#7 DONE 0.9s
+
+#8 [4/8] WORKDIR /app
+#8 DONE 0.0s
+
+#9 [5/8] COPY requirements.txt .
+#9 DONE 0.0s
+
+#10 [6/8] RUN python -m pip install --upgrade pip && pip install --no-cache-dir -r requirements.txt
+#10 2.404 Requirement already satisfied: pip in /usr/local/lib/python3.13/site-packages (25.3)
+#10 2.935 Collecting pip
+#10 3.570 Downloading pip-26.0-py3-none-any.whl.metadata (4.7 kB)
+#10 3.663 Downloading pip-26.0-py3-none-any.whl (1.8 MB)
+#10 4.116 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.8/1.8 MB 6.2 MB/s 0:00:00
+#10 4.143 Installing collected packages: pip
+#10 4.143 Attempting uninstall: pip
+#10 4.148 Found existing installation: pip 25.3
+#10 4.325 Uninstalling pip-25.3:
+#10 5.017 Successfully uninstalled pip-25.3
+#10 6.206 Successfully installed pip-26.0
+#10 6.207 WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.
+#10 7.596 Collecting Flask==3.1.0 (from -r requirements.txt (line 1))
+#10 7.863 Downloading flask-3.1.0-py3-none-any.whl.metadata (2.7 kB)
+#10 7.957 Collecting Werkzeug>=3.1 (from Flask==3.1.0->-r requirements.txt (line 1))
+#10 8.021 Downloading werkzeug-3.1.5-py3-none-any.whl.metadata (4.0 kB)
+#10 8.101 Collecting Jinja2>=3.1.2 (from Flask==3.1.0->-r requirements.txt (line 1))
+#10 8.169 Downloading jinja2-3.1.6-py3-none-any.whl.metadata (2.9 kB)
+#10 8.244 Collecting itsdangerous>=2.2 (from Flask==3.1.0->-r requirements.txt (line 1))
+#10 8.308 Downloading itsdangerous-2.2.0-py3-none-any.whl.metadata (1.9 kB)
+#10 8.393 Collecting click>=8.1.3 (from Flask==3.1.0->-r requirements.txt (line 1))
+#10 8.452 Downloading click-8.3.1-py3-none-any.whl.metadata (2.6 kB)
+#10 8.517 Collecting blinker>=1.9 (from Flask==3.1.0->-r requirements.txt (line 1))
+#10 8.586 Downloading blinker-1.9.0-py3-none-any.whl.metadata (1.6 kB)
+#10 8.738 Collecting MarkupSafe>=2.0 (from Jinja2>=3.1.2->Flask==3.1.0->-r requirements.txt (line 1))
+#10 8.814 Downloading markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl.metadata (2.7 kB)
+#10 8.884 Downloading flask-3.1.0-py3-none-any.whl (102 kB)
+#10 9.027 Downloading blinker-1.9.0-py3-none-any.whl (8.5 kB)
+#10 9.091 Downloading click-8.3.1-py3-none-any.whl (108 kB)
+#10 9.172 Downloading itsdangerous-2.2.0-py3-none-any.whl (16 kB)
+#10 9.237 Downloading jinja2-3.1.6-py3-none-any.whl (134 kB)
+#10 9.321 Downloading markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl (24 kB)
+#10 9.391 Downloading werkzeug-3.1.5-py3-none-any.whl (225 kB)
+#10 9.445 Installing collected packages: MarkupSafe, itsdangerous, click, blinker, Werkzeug, Jinja2, Flask
+#10 9.907
+#10 9.910 Successfully installed Flask-3.1.0 Jinja2-3.1.6 MarkupSafe-3.0.3 Werkzeug-3.1.5 blinker-1.9.0 click-8.3.1 itsdangerous-2.2.0
+#10 9.910 WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.
+#10 DONE 10.4s
+
+#11 [7/8] COPY app.py .
+#11 DONE 0.0s
+
+#12 [8/8] RUN chown -R appuser:appgroup /app
+#12 DONE 0.2s
+
+#13 exporting to image
+#13 exporting layers
+#13 exporting layers 7.3s done
+#13 exporting manifest sha256:21510e1ea3021e5b4b871880b88467040fecda34575b62215d2630d96ea9df55 done
+#13 exporting config sha256:ae057c1a0b4faee60ec0d57053519e7d81aba7a32b4d66fe0cab58a5685a8a75 done
+#13 exporting attestation manifest sha256:261ce7665a093eebbd77c9bd681f591e7320e2d11e1277d4c0d1aa66fc026584 0.0s done
+#13 exporting manifest list sha256:0bdb7eed5b1a2d0c94182973a6883de99afdb9efbe26b1156d7c8d17b5469845 done
+#13 naming to docker.io/library/devops-info-service:lab02 done
+#13 unpacking to docker.io/library/devops-info-service:lab02
+#13 unpacking to docker.io/library/devops-info-service:lab02 1.6s done
+#13 DONE 9.0s
+
+View build details: docker-desktop://dashboard/build/desktop-linux/desktop-linux/10yr646xr8e4bz24kpr8ynrq5
diff --git a/app_python/docs/docker_hub_url.txt b/app_python/docs/docker_hub_url.txt
new file mode 100644
index 0000000000..16f82597b7
--- /dev/null
+++ b/app_python/docs/docker_hub_url.txt
@@ -0,0 +1 @@
+https://hub.docker.com/r//devops-info-service
diff --git a/app_python/docs/image_info.txt b/app_python/docs/image_info.txt
new file mode 100644
index 0000000000..b832dda900
--- /dev/null
+++ b/app_python/docs/image_info.txt
@@ -0,0 +1,2 @@
+REPOSITORY TAG SIZE
+devops-info-service lab02 457MB
diff --git a/app_python/docs/lab02_curl_health.json b/app_python/docs/lab02_curl_health.json
new file mode 100644
index 0000000000..eeb74562ca
--- /dev/null
+++ b/app_python/docs/lab02_curl_health.json
@@ -0,0 +1,5 @@
+{
+ "status": "healthy",
+ "timestamp": "2026-02-04T21:01:34.750688+00:00",
+ "uptime_seconds": 24
+}
diff --git a/app_python/docs/lab02_curl_main.json b/app_python/docs/lab02_curl_main.json
new file mode 100644
index 0000000000..0071699bbd
--- /dev/null
+++ b/app_python/docs/lab02_curl_main.json
@@ -0,0 +1,40 @@
+{
+ "endpoints": [
+ {
+ "description": "Service information",
+ "method": "GET",
+ "path": "/"
+ },
+ {
+ "description": "Health check",
+ "method": "GET",
+ "path": "/health"
+ }
+ ],
+ "request": {
+ "client_ip": "192.168.65.1",
+ "method": "GET",
+ "path": "/",
+ "user_agent": "curl/8.7.1"
+ },
+ "runtime": {
+ "current_time": "2026-02-04T21:01:22.363918+00:00",
+ "timezone": "UTC",
+ "uptime_human": "0 hours, 0 minutes",
+ "uptime_seconds": 11
+ },
+ "service": {
+ "description": "DevOps course info service",
+ "framework": "Flask",
+ "name": "devops-info-service",
+ "version": "1.0.0"
+ },
+ "system": {
+ "architecture": "aarch64",
+ "cpu_count": 8,
+ "hostname": "92865388df9f",
+ "platform": "Linux",
+ "platform_version": "6.10.14-linuxkit",
+ "python_version": "3.13.11"
+ }
+}
diff --git a/app_python/docs/push_output.txt b/app_python/docs/push_output.txt
new file mode 100644
index 0000000000..ec1ed7b854
--- /dev/null
+++ b/app_python/docs/push_output.txt
@@ -0,0 +1,288 @@
+The push refers to repository [docker.io/versceana/devops-info-service]
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+8a73290aa36d: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+e4d6198a1cf3: Waiting
+8a73290aa36d: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+e4d6198a1cf3: Waiting
+8a73290aa36d: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+0a9eaad638a4: Waiting
+3ea009573b47: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+a6866fe8c3d2: Waiting
+97fc85b49690: Waiting
+a442129b4195: Waiting
+fe9a90620d58: Waiting
+88747c24f5a3: Waiting
+8a73290aa36d: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+88747c24f5a3: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+d878836394b3: Waiting
+e0486f87d018: Waiting
+e4d6198a1cf3: Waiting
+557e876784bf: Waiting
+0a9eaad638a4: Waiting
+d878836394b3: Waiting
+97fc85b49690: Pushed
+8a73290aa36d: Pushed
+e0486f87d018: Pushed
+88747c24f5a3: Pushed
+e4d6198a1cf3: Pushed
+0a9eaad638a4: Pushed
+d878836394b3: Pushed
+fe9a90620d58: Pushed
+a442129b4195: Pushed
+a6866fe8c3d2: Pushed
+3ea009573b47: Pushed
+557e876784bf: Pushed
+lab02: digest: sha256:0bdb7eed5b1a2d0c94182973a6883de99afdb9efbe26b1156d7c8d17b5469845 size: 856
diff --git a/app_python/docs/screenshots/01-startup.png b/app_python/docs/screenshots/01-startup.png
new file mode 100644
index 0000000000..abbf4e9388
Binary files /dev/null and b/app_python/docs/screenshots/01-startup.png differ
diff --git a/app_python/docs/screenshots/02-main-endpoint.png b/app_python/docs/screenshots/02-main-endpoint.png
new file mode 100644
index 0000000000..220d7e9846
Binary files /dev/null and b/app_python/docs/screenshots/02-main-endpoint.png differ
diff --git a/app_python/docs/screenshots/03-health-check.png b/app_python/docs/screenshots/03-health-check.png
new file mode 100644
index 0000000000..a328ee0f36
Binary files /dev/null and b/app_python/docs/screenshots/03-health-check.png differ
diff --git a/app_python/docs/screenshots/SSH-palumi-VM.png b/app_python/docs/screenshots/SSH-palumi-VM.png
new file mode 100644
index 0000000000..89d25d5bf2
Binary files /dev/null and b/app_python/docs/screenshots/SSH-palumi-VM.png differ
diff --git a/app_python/docs/screenshots/SSH-terraform-VM.png b/app_python/docs/screenshots/SSH-terraform-VM.png
new file mode 100644
index 0000000000..8470a07fe3
Binary files /dev/null and b/app_python/docs/screenshots/SSH-terraform-VM.png differ
diff --git a/app_python/docs/screenshots/agrocd-python-app.png b/app_python/docs/screenshots/agrocd-python-app.png
new file mode 100644
index 0000000000..ff7e38b2fb
Binary files /dev/null and b/app_python/docs/screenshots/agrocd-python-app.png differ
diff --git a/app_python/docs/screenshots/agrocd-replicas.png b/app_python/docs/screenshots/agrocd-replicas.png
new file mode 100644
index 0000000000..fd9ef98527
Binary files /dev/null and b/app_python/docs/screenshots/agrocd-replicas.png differ
diff --git a/app_python/docs/screenshots/alertmanager.png b/app_python/docs/screenshots/alertmanager.png
new file mode 100644
index 0000000000..2dcffdaa02
Binary files /dev/null and b/app_python/docs/screenshots/alertmanager.png differ
diff --git a/app_python/docs/screenshots/app-health.png b/app_python/docs/screenshots/app-health.png
new file mode 100644
index 0000000000..0d16aaf3de
Binary files /dev/null and b/app_python/docs/screenshots/app-health.png differ
diff --git a/app_python/docs/screenshots/argocd-app-list.png b/app_python/docs/screenshots/argocd-app-list.png
new file mode 100644
index 0000000000..9b3edbb82b
Binary files /dev/null and b/app_python/docs/screenshots/argocd-app-list.png differ
diff --git a/app_python/docs/screenshots/argocd-app-sync.png b/app_python/docs/screenshots/argocd-app-sync.png
new file mode 100644
index 0000000000..ef363863d4
Binary files /dev/null and b/app_python/docs/screenshots/argocd-app-sync.png differ
diff --git a/app_python/docs/screenshots/argocd-pods.png b/app_python/docs/screenshots/argocd-pods.png
new file mode 100644
index 0000000000..4c07f94bce
Binary files /dev/null and b/app_python/docs/screenshots/argocd-pods.png differ
diff --git a/app_python/docs/screenshots/argocd-rollouts-pods.png b/app_python/docs/screenshots/argocd-rollouts-pods.png
new file mode 100644
index 0000000000..4dff221819
Binary files /dev/null and b/app_python/docs/screenshots/argocd-rollouts-pods.png differ
diff --git a/app_python/docs/screenshots/argocd-version.png b/app_python/docs/screenshots/argocd-version.png
new file mode 100644
index 0000000000..6e7c08a669
Binary files /dev/null and b/app_python/docs/screenshots/argocd-version.png differ
diff --git a/app_python/docs/screenshots/bluegreen-healthy.png b/app_python/docs/screenshots/bluegreen-healthy.png
new file mode 100644
index 0000000000..fb5de88eaf
Binary files /dev/null and b/app_python/docs/screenshots/bluegreen-healthy.png differ
diff --git a/app_python/docs/screenshots/bluegreen-preview-nginx.png b/app_python/docs/screenshots/bluegreen-preview-nginx.png
new file mode 100644
index 0000000000..ed81e5f19c
Binary files /dev/null and b/app_python/docs/screenshots/bluegreen-preview-nginx.png differ
diff --git a/app_python/docs/screenshots/canary-finish.png b/app_python/docs/screenshots/canary-finish.png
new file mode 100644
index 0000000000..799b14b81d
Binary files /dev/null and b/app_python/docs/screenshots/canary-finish.png differ
diff --git a/app_python/docs/screenshots/canary-healthy.png b/app_python/docs/screenshots/canary-healthy.png
new file mode 100644
index 0000000000..9f7ecdca98
Binary files /dev/null and b/app_python/docs/screenshots/canary-healthy.png differ
diff --git a/app_python/docs/screenshots/canary-nginx.png b/app_python/docs/screenshots/canary-nginx.png
new file mode 100644
index 0000000000..5da194a33b
Binary files /dev/null and b/app_python/docs/screenshots/canary-nginx.png differ
diff --git a/app_python/docs/screenshots/config-env.png b/app_python/docs/screenshots/config-env.png
new file mode 100644
index 0000000000..284f779da0
Binary files /dev/null and b/app_python/docs/screenshots/config-env.png differ
diff --git a/app_python/docs/screenshots/counter-persistence.png b/app_python/docs/screenshots/counter-persistence.png
new file mode 100644
index 0000000000..0fef3d5bb5
Binary files /dev/null and b/app_python/docs/screenshots/counter-persistence.png differ
diff --git a/app_python/docs/screenshots/dashboard.png b/app_python/docs/screenshots/dashboard.png
new file mode 100644
index 0000000000..8a5abfbca4
Binary files /dev/null and b/app_python/docs/screenshots/dashboard.png differ
diff --git a/app_python/docs/screenshots/deployment-pods.png b/app_python/docs/screenshots/deployment-pods.png
new file mode 100644
index 0000000000..82fa20bb17
Binary files /dev/null and b/app_python/docs/screenshots/deployment-pods.png differ
diff --git a/app_python/docs/screenshots/dev-pods.png b/app_python/docs/screenshots/dev-pods.png
new file mode 100644
index 0000000000..5e768a8cb5
Binary files /dev/null and b/app_python/docs/screenshots/dev-pods.png differ
diff --git a/app_python/docs/screenshots/different-visits.png b/app_python/docs/screenshots/different-visits.png
new file mode 100644
index 0000000000..7edafbe392
Binary files /dev/null and b/app_python/docs/screenshots/different-visits.png differ
diff --git a/app_python/docs/screenshots/docker-compose-ps.png b/app_python/docs/screenshots/docker-compose-ps.png
new file mode 100644
index 0000000000..8b7a0bb2a0
Binary files /dev/null and b/app_python/docs/screenshots/docker-compose-ps.png differ
diff --git a/app_python/docs/screenshots/grafana-datasource.png b/app_python/docs/screenshots/grafana-datasource.png
new file mode 100644
index 0000000000..6e9ba6c2d7
Binary files /dev/null and b/app_python/docs/screenshots/grafana-datasource.png differ
diff --git a/app_python/docs/screenshots/grafana.png b/app_python/docs/screenshots/grafana.png
new file mode 100644
index 0000000000..bd377dfa8a
Binary files /dev/null and b/app_python/docs/screenshots/grafana.png differ
diff --git a/app_python/docs/screenshots/headless-service.png b/app_python/docs/screenshots/headless-service.png
new file mode 100644
index 0000000000..61520ffbfc
Binary files /dev/null and b/app_python/docs/screenshots/headless-service.png differ
diff --git a/app_python/docs/screenshots/helm-list.png b/app_python/docs/screenshots/helm-list.png
new file mode 100644
index 0000000000..3ba317fdfe
Binary files /dev/null and b/app_python/docs/screenshots/helm-list.png differ
diff --git a/app_python/docs/screenshots/helm-template-hooks.png b/app_python/docs/screenshots/helm-template-hooks.png
new file mode 100644
index 0000000000..83a1d8b54a
Binary files /dev/null and b/app_python/docs/screenshots/helm-template-hooks.png differ
diff --git a/app_python/docs/screenshots/helm-version.png b/app_python/docs/screenshots/helm-version.png
new file mode 100644
index 0000000000..79bcc03340
Binary files /dev/null and b/app_python/docs/screenshots/helm-version.png differ
diff --git a/app_python/docs/screenshots/json-logs-explore.png b/app_python/docs/screenshots/json-logs-explore.png
new file mode 100644
index 0000000000..1bc9ac676e
Binary files /dev/null and b/app_python/docs/screenshots/json-logs-explore.png differ
diff --git a/app_python/docs/screenshots/k8s-secret-yaml.png b/app_python/docs/screenshots/k8s-secret-yaml.png
new file mode 100644
index 0000000000..e8d39e5fec
Binary files /dev/null and b/app_python/docs/screenshots/k8s-secret-yaml.png differ
diff --git a/app_python/docs/screenshots/kube-prometheus-stack.png b/app_python/docs/screenshots/kube-prometheus-stack.png
new file mode 100644
index 0000000000..b9f0138eb3
Binary files /dev/null and b/app_python/docs/screenshots/kube-prometheus-stack.png differ
diff --git a/app_python/docs/screenshots/lab6-1.3.png b/app_python/docs/screenshots/lab6-1.3.png
new file mode 100644
index 0000000000..31319ccbdc
Binary files /dev/null and b/app_python/docs/screenshots/lab6-1.3.png differ
diff --git a/app_python/docs/screenshots/lab6-2.4-first-deploy.png b/app_python/docs/screenshots/lab6-2.4-first-deploy.png
new file mode 100644
index 0000000000..e766aa879c
Binary files /dev/null and b/app_python/docs/screenshots/lab6-2.4-first-deploy.png differ
diff --git a/app_python/docs/screenshots/lab6-2.4-second-deploy.png b/app_python/docs/screenshots/lab6-2.4-second-deploy.png
new file mode 100644
index 0000000000..9e5b7f159b
Binary files /dev/null and b/app_python/docs/screenshots/lab6-2.4-second-deploy.png differ
diff --git a/app_python/docs/screenshots/lab6-3-1st-scenario.png b/app_python/docs/screenshots/lab6-3-1st-scenario.png
new file mode 100644
index 0000000000..2ff6c5d08b
Binary files /dev/null and b/app_python/docs/screenshots/lab6-3-1st-scenario.png differ
diff --git a/app_python/docs/screenshots/lab6-3-2nd-scenario.png b/app_python/docs/screenshots/lab6-3-2nd-scenario.png
new file mode 100644
index 0000000000..8091b73628
Binary files /dev/null and b/app_python/docs/screenshots/lab6-3-2nd-scenario.png differ
diff --git a/app_python/docs/screenshots/lab6-3-3rd-scenario.png b/app_python/docs/screenshots/lab6-3-3rd-scenario.png
new file mode 100644
index 0000000000..6e83ad9326
Binary files /dev/null and b/app_python/docs/screenshots/lab6-3-3rd-scenario.png differ
diff --git a/app_python/docs/screenshots/lab6-3-4th-scenario.png b/app_python/docs/screenshots/lab6-3-4th-scenario.png
new file mode 100644
index 0000000000..b317221fde
Binary files /dev/null and b/app_python/docs/screenshots/lab6-3-4th-scenario.png differ
diff --git a/app_python/docs/screenshots/local-dev-test.png b/app_python/docs/screenshots/local-dev-test.png
new file mode 100644
index 0000000000..2827d63ddf
Binary files /dev/null and b/app_python/docs/screenshots/local-dev-test.png differ
diff --git a/app_python/docs/screenshots/local-logs-dev.png b/app_python/docs/screenshots/local-logs-dev.png
new file mode 100644
index 0000000000..d015c33390
Binary files /dev/null and b/app_python/docs/screenshots/local-logs-dev.png differ
diff --git a/app_python/docs/screenshots/local-visits.png b/app_python/docs/screenshots/local-visits.png
new file mode 100644
index 0000000000..51b5385ee5
Binary files /dev/null and b/app_python/docs/screenshots/local-visits.png differ
diff --git a/app_python/docs/screenshots/metrics-dashboard.png b/app_python/docs/screenshots/metrics-dashboard.png
new file mode 100644
index 0000000000..94c5fcd19a
Binary files /dev/null and b/app_python/docs/screenshots/metrics-dashboard.png differ
diff --git a/app_python/docs/screenshots/palumi-preview.png b/app_python/docs/screenshots/palumi-preview.png
new file mode 100644
index 0000000000..15286b9f35
Binary files /dev/null and b/app_python/docs/screenshots/palumi-preview.png differ
diff --git a/app_python/docs/screenshots/palumi-up.png b/app_python/docs/screenshots/palumi-up.png
new file mode 100644
index 0000000000..59d4c69430
Binary files /dev/null and b/app_python/docs/screenshots/palumi-up.png differ
diff --git a/app_python/docs/screenshots/persistence-test.png b/app_python/docs/screenshots/persistence-test.png
new file mode 100644
index 0000000000..2ffc2126c7
Binary files /dev/null and b/app_python/docs/screenshots/persistence-test.png differ
diff --git a/app_python/docs/screenshots/prod-pods.png b/app_python/docs/screenshots/prod-pods.png
new file mode 100644
index 0000000000..04b8695432
Binary files /dev/null and b/app_python/docs/screenshots/prod-pods.png differ
diff --git a/app_python/docs/screenshots/prometheus-targets.png b/app_python/docs/screenshots/prometheus-targets.png
new file mode 100644
index 0000000000..f5f5fd54c9
Binary files /dev/null and b/app_python/docs/screenshots/prometheus-targets.png differ
diff --git a/app_python/docs/screenshots/public-endpoint.png b/app_python/docs/screenshots/public-endpoint.png
new file mode 100644
index 0000000000..81c650117d
Binary files /dev/null and b/app_python/docs/screenshots/public-endpoint.png differ
diff --git a/app_python/docs/screenshots/rollback-execution.png b/app_python/docs/screenshots/rollback-execution.png
new file mode 100644
index 0000000000..f9b22b38f6
Binary files /dev/null and b/app_python/docs/screenshots/rollback-execution.png differ
diff --git a/app_python/docs/screenshots/rollout-history.png b/app_python/docs/screenshots/rollout-history.png
new file mode 100644
index 0000000000..aa35fb78f2
Binary files /dev/null and b/app_python/docs/screenshots/rollout-history.png differ
diff --git a/app_python/docs/screenshots/rollouts-dashboard.png b/app_python/docs/screenshots/rollouts-dashboard.png
new file mode 100644
index 0000000000..ff139ef832
Binary files /dev/null and b/app_python/docs/screenshots/rollouts-dashboard.png differ
diff --git a/app_python/docs/screenshots/service-access.png b/app_python/docs/screenshots/service-access.png
new file mode 100644
index 0000000000..77d300f3f8
Binary files /dev/null and b/app_python/docs/screenshots/service-access.png differ
diff --git a/app_python/docs/screenshots/statefulset-pods-pvc.png b/app_python/docs/screenshots/statefulset-pods-pvc.png
new file mode 100644
index 0000000000..bc58c5117e
Binary files /dev/null and b/app_python/docs/screenshots/statefulset-pods-pvc.png differ
diff --git a/app_python/docs/screenshots/terraform-destroy.png b/app_python/docs/screenshots/terraform-destroy.png
new file mode 100644
index 0000000000..3a92d04ffa
Binary files /dev/null and b/app_python/docs/screenshots/terraform-destroy.png differ
diff --git a/app_python/docs/screenshots/update-strategy-partition.png b/app_python/docs/screenshots/update-strategy-partition.png
new file mode 100644
index 0000000000..7f809de1b9
Binary files /dev/null and b/app_python/docs/screenshots/update-strategy-partition.png differ
diff --git a/app_python/docs/screenshots/vault-logs-kube.png b/app_python/docs/screenshots/vault-logs-kube.png
new file mode 100644
index 0000000000..82f92e5ba3
Binary files /dev/null and b/app_python/docs/screenshots/vault-logs-kube.png differ
diff --git a/app_python/docs/screenshots/vault-pods-kube.png b/app_python/docs/screenshots/vault-pods-kube.png
new file mode 100644
index 0000000000..970086715c
Binary files /dev/null and b/app_python/docs/screenshots/vault-pods-kube.png differ
diff --git a/app_python/docs/screenshots/vault-pods.png b/app_python/docs/screenshots/vault-pods.png
new file mode 100644
index 0000000000..781cbc6928
Binary files /dev/null and b/app_python/docs/screenshots/vault-pods.png differ
diff --git a/app_python/docs/screenshots/vault.png b/app_python/docs/screenshots/vault.png
new file mode 100644
index 0000000000..7fc99707f3
Binary files /dev/null and b/app_python/docs/screenshots/vault.png differ
diff --git a/app_python/docs/screenshots/wrangler-secrets.png b/app_python/docs/screenshots/wrangler-secrets.png
new file mode 100644
index 0000000000..5d536228cd
Binary files /dev/null and b/app_python/docs/screenshots/wrangler-secrets.png differ
diff --git a/app_python/docs/screenshots/wrangler-tail.png b/app_python/docs/screenshots/wrangler-tail.png
new file mode 100644
index 0000000000..cdf1d5c237
Binary files /dev/null and b/app_python/docs/screenshots/wrangler-tail.png differ
diff --git a/app_python/docs/screenshots/wrangler-whoami.png b/app_python/docs/screenshots/wrangler-whoami.png
new file mode 100644
index 0000000000..11ff2a3405
Binary files /dev/null and b/app_python/docs/screenshots/wrangler-whoami.png differ
diff --git a/app_python/edge-api/.gitignore b/app_python/edge-api/.gitignore
new file mode 100644
index 0000000000..dc4dcbb726
--- /dev/null
+++ b/app_python/edge-api/.gitignore
@@ -0,0 +1,10 @@
+node_modules/
+dist/
+build/
+.wrangler/
+.env.local
+.env.*.local
+*.log
+.DS_Store
+.cache/
+.turbo
diff --git a/app_python/edge-api/package-lock.json b/app_python/edge-api/package-lock.json
new file mode 100644
index 0000000000..8b880be504
--- /dev/null
+++ b/app_python/edge-api/package-lock.json
@@ -0,0 +1,1513 @@
+{
+ "name": "edge-api",
+ "version": "1.0.0",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "edge-api",
+ "version": "1.0.0",
+ "license": "MIT",
+ "devDependencies": {
+ "@cloudflare/workers-types": "^4.20240815.1",
+ "wrangler": "^4.90.1"
+ }
+ },
+ "node_modules/@cloudflare/kv-asset-handler": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/@cloudflare/kv-asset-handler/-/kv-asset-handler-0.5.0.tgz",
+ "integrity": "sha512-jxQYkj8dSIzc0cD6cMMNdOc1UVjqSqu8BZdor5s8cGjW2I8BjODt/kWPVdY+u9zj3ms75Q5qaZgnxUad83+eAg==",
+ "dev": true,
+ "license": "MIT OR Apache-2.0",
+ "engines": {
+ "node": ">=22.0.0"
+ }
+ },
+ "node_modules/@cloudflare/unenv-preset": {
+ "version": "2.16.1",
+ "resolved": "https://registry.npmjs.org/@cloudflare/unenv-preset/-/unenv-preset-2.16.1.tgz",
+ "integrity": "sha512-ECxObrMfyTl5bhQf/lZCXwo5G6xX9IAUo+nDMKK4SZ8m4Jvvxp52vilxyySSWh2YTZz8+HQ07qGH/2rEom1vDw==",
+ "dev": true,
+ "license": "MIT OR Apache-2.0",
+ "peerDependencies": {
+ "unenv": "2.0.0-rc.24",
+ "workerd": ">1.20260305.0 <2.0.0-0"
+ },
+ "peerDependenciesMeta": {
+ "workerd": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@cloudflare/workerd-darwin-64": {
+ "version": "1.20260508.1",
+ "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-64/-/workerd-darwin-64-1.20260508.1.tgz",
+ "integrity": "sha512-IT3r6VgiSwIesL4AJbxjgxvIxwWZqM7BKkhYAzOKHl4GF2M0TxeOahUIXd+CYXVZgHX8ceEg+MXbEehPelJyNg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=16"
+ }
+ },
+ "node_modules/@cloudflare/workerd-darwin-arm64": {
+ "version": "1.20260508.1",
+ "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-arm64/-/workerd-darwin-arm64-1.20260508.1.tgz",
+ "integrity": "sha512-JTVsisOJPcNKw0qovPjqyBWYahfdhUh7/9NICiG5wxaEQ45PYKdoqNq0hOAAIqvqoxsKZBvTgcPTJREPqk7avA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=16"
+ }
+ },
+ "node_modules/@cloudflare/workerd-linux-64": {
+ "version": "1.20260508.1",
+ "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-64/-/workerd-linux-64-1.20260508.1.tgz",
+ "integrity": "sha512-zO38pCc27YlsZiPYcaZnosy0/t7abXrRU3VEO1oKfUvnaCpHgphDG+VsrmHL+kntda6hrtNwg2jLeMAqqIjnjw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=16"
+ }
+ },
+ "node_modules/@cloudflare/workerd-linux-arm64": {
+ "version": "1.20260508.1",
+ "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-arm64/-/workerd-linux-arm64-1.20260508.1.tgz",
+ "integrity": "sha512-XhJa780Ia6MNIrtxn/ruZHS79b9pu5EKPfRNReaUqxy8erPT2fs93axMfFoS9kIkcaRRj/1TOUKcTeAMoywY7w==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=16"
+ }
+ },
+ "node_modules/@cloudflare/workerd-windows-64": {
+ "version": "1.20260508.1",
+ "resolved": "https://registry.npmjs.org/@cloudflare/workerd-windows-64/-/workerd-windows-64-1.20260508.1.tgz",
+ "integrity": "sha512-QdDOK3B/Ul1s3QmIwDrFyx9230to6LsNmWcVR8w+TYjNZuRPzqQBgusp78LO7MlqCoEl9dvIcN00jkJnLtBSfw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=16"
+ }
+ },
+ "node_modules/@cloudflare/workers-types": {
+ "version": "4.20260511.1",
+ "resolved": "https://registry.npmjs.org/@cloudflare/workers-types/-/workers-types-4.20260511.1.tgz",
+ "integrity": "sha512-FA+si7cOq9i/gtCHhIc0XJL0l1F/ApF+m00752Aj7WZFJrj3ZulT2T8/+rT3BabMT0QEnqFEGIqCgrmqhgEfMg==",
+ "dev": true,
+ "license": "MIT OR Apache-2.0"
+ },
+ "node_modules/@cspotcode/source-map-support": {
+ "version": "0.8.1",
+ "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz",
+ "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/trace-mapping": "0.3.9"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@emnapi/runtime": {
+ "version": "1.10.0",
+ "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.10.0.tgz",
+ "integrity": "sha512-ewvYlk86xUoGI0zQRNq/mC+16R1QeDlKQy21Ki3oSYXNgLb45GV1P6A0M+/s6nyCuNDqe5VpaY84BzXGwVbwFA==",
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "tslib": "^2.4.0"
+ }
+ },
+ "node_modules/@esbuild/aix-ppc64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.3.tgz",
+ "integrity": "sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "aix"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/android-arm": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.3.tgz",
+ "integrity": "sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/android-arm64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.3.tgz",
+ "integrity": "sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/android-x64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.3.tgz",
+ "integrity": "sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/darwin-arm64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.3.tgz",
+ "integrity": "sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/darwin-x64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.3.tgz",
+ "integrity": "sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/freebsd-arm64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.3.tgz",
+ "integrity": "sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/freebsd-x64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.3.tgz",
+ "integrity": "sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-arm": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.3.tgz",
+ "integrity": "sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-arm64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.3.tgz",
+ "integrity": "sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-ia32": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.3.tgz",
+ "integrity": "sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-loong64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.3.tgz",
+ "integrity": "sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-mips64el": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.3.tgz",
+ "integrity": "sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==",
+ "cpu": [
+ "mips64el"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-ppc64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.3.tgz",
+ "integrity": "sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-riscv64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.3.tgz",
+ "integrity": "sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-s390x": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.3.tgz",
+ "integrity": "sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-x64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.3.tgz",
+ "integrity": "sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/netbsd-arm64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.3.tgz",
+ "integrity": "sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/netbsd-x64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.3.tgz",
+ "integrity": "sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/openbsd-arm64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.3.tgz",
+ "integrity": "sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/openbsd-x64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.3.tgz",
+ "integrity": "sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/openharmony-arm64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.3.tgz",
+ "integrity": "sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openharmony"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/sunos-x64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.3.tgz",
+ "integrity": "sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "sunos"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/win32-arm64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.3.tgz",
+ "integrity": "sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/win32-ia32": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.3.tgz",
+ "integrity": "sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/win32-x64": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.3.tgz",
+ "integrity": "sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@img/colour": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@img/colour/-/colour-1.1.0.tgz",
+ "integrity": "sha512-Td76q7j57o/tLVdgS746cYARfSyxk8iEfRxewL9h4OMzYhbW4TAcppl0mT4eyqXddh6L/jwoM75mo7ixa/pCeQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@img/sharp-darwin-arm64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.34.5.tgz",
+ "integrity": "sha512-imtQ3WMJXbMY4fxb/Ndp6HBTNVtWCUI0WdobyheGf5+ad6xX8VIDO8u2xE4qc/fr08CKG/7dDseFtn6M6g/r3w==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-darwin-arm64": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-darwin-x64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.34.5.tgz",
+ "integrity": "sha512-YNEFAF/4KQ/PeW0N+r+aVVsoIY0/qxxikF2SWdp+NRkmMB7y9LBZAVqQ4yhGCm/H3H270OSykqmQMKLBhBJDEw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-darwin-x64": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-libvips-darwin-arm64": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.2.4.tgz",
+ "integrity": "sha512-zqjjo7RatFfFoP0MkQ51jfuFZBnVE2pRiaydKJ1G/rHZvnsrHAOcQALIi9sA5co5xenQdTugCvtb1cuf78Vf4g==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-libvips-darwin-x64": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.2.4.tgz",
+ "integrity": "sha512-1IOd5xfVhlGwX+zXv2N93k0yMONvUlANylbJw1eTah8K/Jtpi15KC+WSiaX/nBmbm2HxRM1gZ0nSdjSsrZbGKg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-libvips-linux-arm": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.2.4.tgz",
+ "integrity": "sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-libvips-linux-arm64": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.2.4.tgz",
+ "integrity": "sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-libvips-linux-ppc64": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-ppc64/-/sharp-libvips-linux-ppc64-1.2.4.tgz",
+ "integrity": "sha512-FMuvGijLDYG6lW+b/UvyilUWu5Ayu+3r2d1S8notiGCIyYU/76eig1UfMmkZ7vwgOrzKzlQbFSuQfgm7GYUPpA==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-libvips-linux-riscv64": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-riscv64/-/sharp-libvips-linux-riscv64-1.2.4.tgz",
+ "integrity": "sha512-oVDbcR4zUC0ce82teubSm+x6ETixtKZBh/qbREIOcI3cULzDyb18Sr/Wcyx7NRQeQzOiHTNbZFF1UwPS2scyGA==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-libvips-linux-s390x": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.2.4.tgz",
+ "integrity": "sha512-qmp9VrzgPgMoGZyPvrQHqk02uyjA0/QrTO26Tqk6l4ZV0MPWIW6LTkqOIov+J1yEu7MbFQaDpwdwJKhbJvuRxQ==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-libvips-linux-x64": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.2.4.tgz",
+ "integrity": "sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-libvips-linuxmusl-arm64": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.2.4.tgz",
+ "integrity": "sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-libvips-linuxmusl-x64": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.2.4.tgz",
+ "integrity": "sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-linux-arm": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.34.5.tgz",
+ "integrity": "sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-arm": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-linux-arm64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.34.5.tgz",
+ "integrity": "sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-arm64": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-linux-ppc64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-ppc64/-/sharp-linux-ppc64-0.34.5.tgz",
+ "integrity": "sha512-7zznwNaqW6YtsfrGGDA6BRkISKAAE1Jo0QdpNYXNMHu2+0dTrPflTLNkpc8l7MUP5M16ZJcUvysVWWrMefZquA==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-ppc64": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-linux-riscv64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-riscv64/-/sharp-linux-riscv64-0.34.5.tgz",
+ "integrity": "sha512-51gJuLPTKa7piYPaVs8GmByo7/U7/7TZOq+cnXJIHZKavIRHAP77e3N2HEl3dgiqdD/w0yUfiJnII77PuDDFdw==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-riscv64": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-linux-s390x": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.34.5.tgz",
+ "integrity": "sha512-nQtCk0PdKfho3eC5MrbQoigJ2gd1CgddUMkabUj+rBevs8tZ2cULOx46E7oyX+04WGfABgIwmMC0VqieTiR4jg==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-s390x": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-linux-x64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.34.5.tgz",
+ "integrity": "sha512-MEzd8HPKxVxVenwAa+JRPwEC7QFjoPWuS5NZnBt6B3pu7EG2Ge0id1oLHZpPJdn3OQK+BQDiw9zStiHBTJQQQQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linux-x64": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-linuxmusl-arm64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.34.5.tgz",
+ "integrity": "sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linuxmusl-arm64": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-linuxmusl-x64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.34.5.tgz",
+ "integrity": "sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-libvips-linuxmusl-x64": "1.2.4"
+ }
+ },
+ "node_modules/@img/sharp-wasm32": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.34.5.tgz",
+ "integrity": "sha512-OdWTEiVkY2PHwqkbBI8frFxQQFekHaSSkUIJkwzclWZe64O1X4UlUjqqqLaPbUpMOQk6FBu/HtlGXNblIs0huw==",
+ "cpu": [
+ "wasm32"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND LGPL-3.0-or-later AND MIT",
+ "optional": true,
+ "dependencies": {
+ "@emnapi/runtime": "^1.7.0"
+ },
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-win32-arm64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-win32-arm64/-/sharp-win32-arm64-0.34.5.tgz",
+ "integrity": "sha512-WQ3AgWCWYSb2yt+IG8mnC6Jdk9Whs7O0gxphblsLvdhSpSTtmu69ZG1Gkb6NuvxsNACwiPV6cNSZNzt0KPsw7g==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-win32-ia32": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.34.5.tgz",
+ "integrity": "sha512-FV9m/7NmeCmSHDD5j4+4pNI8Cp3aW+JvLoXcTUo0IqyjSfAZJ8dIUmijx1qaJsIiU+Hosw6xM5KijAWRJCSgNg==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@img/sharp-win32-x64": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.34.5.tgz",
+ "integrity": "sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND LGPL-3.0-or-later",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ }
+ },
+ "node_modules/@jridgewell/resolve-uri": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
+ "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/sourcemap-codec": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
+ "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jridgewell/trace-mapping": {
+ "version": "0.3.9",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz",
+ "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/resolve-uri": "^3.0.3",
+ "@jridgewell/sourcemap-codec": "^1.4.10"
+ }
+ },
+ "node_modules/@poppinss/colors": {
+ "version": "4.1.6",
+ "resolved": "https://registry.npmjs.org/@poppinss/colors/-/colors-4.1.6.tgz",
+ "integrity": "sha512-H9xkIdFswbS8n1d6vmRd8+c10t2Qe+rZITbbDHHkQixH5+2x1FDGmi/0K+WgWiqQFKPSlIYB7jlH6Kpfn6Fleg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "kleur": "^4.1.5"
+ }
+ },
+ "node_modules/@poppinss/dumper": {
+ "version": "0.6.5",
+ "resolved": "https://registry.npmjs.org/@poppinss/dumper/-/dumper-0.6.5.tgz",
+ "integrity": "sha512-NBdYIb90J7LfOI32dOewKI1r7wnkiH6m920puQ3qHUeZkxNkQiFnXVWoE6YtFSv6QOiPPf7ys6i+HWWecDz7sw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@poppinss/colors": "^4.1.5",
+ "@sindresorhus/is": "^7.0.2",
+ "supports-color": "^10.0.0"
+ }
+ },
+ "node_modules/@poppinss/exception": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/@poppinss/exception/-/exception-1.2.3.tgz",
+ "integrity": "sha512-dCED+QRChTVatE9ibtoaxc+WkdzOSjYTKi/+uacHWIsfodVfpsueo3+DKpgU5Px8qXjgmXkSvhXvSCz3fnP9lw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@sindresorhus/is": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-7.2.0.tgz",
+ "integrity": "sha512-P1Cz1dWaFfR4IR+U13mqqiGsLFf1KbayybWwdd2vfctdV6hDpUkgCY0nKOLLTMSoRd/jJNjtbqzf13K8DCCXQw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sindresorhus/is?sponsor=1"
+ }
+ },
+ "node_modules/@speed-highlight/core": {
+ "version": "1.2.15",
+ "resolved": "https://registry.npmjs.org/@speed-highlight/core/-/core-1.2.15.tgz",
+ "integrity": "sha512-BMq1K3DsElxDWawkX6eLg9+CKJrTVGCBAWVuHXVUV2u0s2711qiChLSId6ikYPfxhdYocLNt3wWwSvDiTvFabw==",
+ "dev": true,
+ "license": "CC0-1.0"
+ },
+ "node_modules/blake3-wasm": {
+ "version": "2.1.5",
+ "resolved": "https://registry.npmjs.org/blake3-wasm/-/blake3-wasm-2.1.5.tgz",
+ "integrity": "sha512-F1+K8EbfOZE49dtoPtmxUQrpXaBIl3ICvasLh+nJta0xkz+9kF/7uet9fLnwKqhDrmj6g+6K3Tw9yQPUg2ka5g==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/cookie": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz",
+ "integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
+ "node_modules/detect-libc": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz",
+ "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/error-stack-parser-es": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/error-stack-parser-es/-/error-stack-parser-es-1.0.5.tgz",
+ "integrity": "sha512-5qucVt2XcuGMcEGgWI7i+yZpmpByQ8J1lHhcL7PwqCwu9FPP3VUXzT4ltHe5i2z9dePwEHcDVOAfSnHsOlCXRA==",
+ "dev": true,
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/antfu"
+ }
+ },
+ "node_modules/esbuild": {
+ "version": "0.27.3",
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.3.tgz",
+ "integrity": "sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "MIT",
+ "bin": {
+ "esbuild": "bin/esbuild"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "optionalDependencies": {
+ "@esbuild/aix-ppc64": "0.27.3",
+ "@esbuild/android-arm": "0.27.3",
+ "@esbuild/android-arm64": "0.27.3",
+ "@esbuild/android-x64": "0.27.3",
+ "@esbuild/darwin-arm64": "0.27.3",
+ "@esbuild/darwin-x64": "0.27.3",
+ "@esbuild/freebsd-arm64": "0.27.3",
+ "@esbuild/freebsd-x64": "0.27.3",
+ "@esbuild/linux-arm": "0.27.3",
+ "@esbuild/linux-arm64": "0.27.3",
+ "@esbuild/linux-ia32": "0.27.3",
+ "@esbuild/linux-loong64": "0.27.3",
+ "@esbuild/linux-mips64el": "0.27.3",
+ "@esbuild/linux-ppc64": "0.27.3",
+ "@esbuild/linux-riscv64": "0.27.3",
+ "@esbuild/linux-s390x": "0.27.3",
+ "@esbuild/linux-x64": "0.27.3",
+ "@esbuild/netbsd-arm64": "0.27.3",
+ "@esbuild/netbsd-x64": "0.27.3",
+ "@esbuild/openbsd-arm64": "0.27.3",
+ "@esbuild/openbsd-x64": "0.27.3",
+ "@esbuild/openharmony-arm64": "0.27.3",
+ "@esbuild/sunos-x64": "0.27.3",
+ "@esbuild/win32-arm64": "0.27.3",
+ "@esbuild/win32-ia32": "0.27.3",
+ "@esbuild/win32-x64": "0.27.3"
+ }
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/kleur": {
+ "version": "4.1.5",
+ "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz",
+ "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/miniflare": {
+ "version": "4.20260508.0",
+ "resolved": "https://registry.npmjs.org/miniflare/-/miniflare-4.20260508.0.tgz",
+ "integrity": "sha512-h3aG+PA8jEH76V4ZtBAbs3g7kjMfHJUF8hPvxeeajLTKwir+G+dqfBODg5yF9MT29LqrZKCRQRqzfHPWX4kCIg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@cspotcode/source-map-support": "0.8.1",
+ "sharp": "^0.34.5",
+ "undici": "7.24.8",
+ "workerd": "1.20260508.1",
+ "ws": "8.18.0",
+ "youch": "4.1.0-beta.10"
+ },
+ "bin": {
+ "miniflare": "bootstrap.js"
+ },
+ "engines": {
+ "node": ">=22.0.0"
+ }
+ },
+ "node_modules/path-to-regexp": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.3.0.tgz",
+ "integrity": "sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/pathe": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz",
+ "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/semver": {
+ "version": "7.8.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.8.0.tgz",
+ "integrity": "sha512-AcM7dV/5ul4EekoQ29Agm5vri8JNqRyj39o0qpX6vDF2GZrtutZl5RwgD1XnZjiTAfncsJhMI48QQH3sN87YNA==",
+ "dev": true,
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/sharp": {
+ "version": "0.34.5",
+ "resolved": "https://registry.npmjs.org/sharp/-/sharp-0.34.5.tgz",
+ "integrity": "sha512-Ou9I5Ft9WNcCbXrU9cMgPBcCK8LiwLqcbywW3t4oDV37n1pzpuNLsYiAV8eODnjbtQlSDwZ2cUEeQz4E54Hltg==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@img/colour": "^1.0.0",
+ "detect-libc": "^2.1.2",
+ "semver": "^7.7.3"
+ },
+ "engines": {
+ "node": "^18.17.0 || ^20.3.0 || >=21.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/libvips"
+ },
+ "optionalDependencies": {
+ "@img/sharp-darwin-arm64": "0.34.5",
+ "@img/sharp-darwin-x64": "0.34.5",
+ "@img/sharp-libvips-darwin-arm64": "1.2.4",
+ "@img/sharp-libvips-darwin-x64": "1.2.4",
+ "@img/sharp-libvips-linux-arm": "1.2.4",
+ "@img/sharp-libvips-linux-arm64": "1.2.4",
+ "@img/sharp-libvips-linux-ppc64": "1.2.4",
+ "@img/sharp-libvips-linux-riscv64": "1.2.4",
+ "@img/sharp-libvips-linux-s390x": "1.2.4",
+ "@img/sharp-libvips-linux-x64": "1.2.4",
+ "@img/sharp-libvips-linuxmusl-arm64": "1.2.4",
+ "@img/sharp-libvips-linuxmusl-x64": "1.2.4",
+ "@img/sharp-linux-arm": "0.34.5",
+ "@img/sharp-linux-arm64": "0.34.5",
+ "@img/sharp-linux-ppc64": "0.34.5",
+ "@img/sharp-linux-riscv64": "0.34.5",
+ "@img/sharp-linux-s390x": "0.34.5",
+ "@img/sharp-linux-x64": "0.34.5",
+ "@img/sharp-linuxmusl-arm64": "0.34.5",
+ "@img/sharp-linuxmusl-x64": "0.34.5",
+ "@img/sharp-wasm32": "0.34.5",
+ "@img/sharp-win32-arm64": "0.34.5",
+ "@img/sharp-win32-ia32": "0.34.5",
+ "@img/sharp-win32-x64": "0.34.5"
+ }
+ },
+ "node_modules/supports-color": {
+ "version": "10.2.2",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-10.2.2.tgz",
+ "integrity": "sha512-SS+jx45GF1QjgEXQx4NJZV9ImqmO2NPz5FNsIHrsDjh2YsHnawpan7SNQ1o8NuhrbHZy9AZhIoCUiCeaW/C80g==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/supports-color?sponsor=1"
+ }
+ },
+ "node_modules/tslib": {
+ "version": "2.8.1",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
+ "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==",
+ "dev": true,
+ "license": "0BSD",
+ "optional": true
+ },
+ "node_modules/undici": {
+ "version": "7.24.8",
+ "resolved": "https://registry.npmjs.org/undici/-/undici-7.24.8.tgz",
+ "integrity": "sha512-6KQ/+QxK49Z/p3HO6E5ZCZWNnCasyZLa5ExaVYyvPxUwKtbCPMKELJOqh7EqOle0t9cH/7d2TaaTRRa6Nhs4YQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=20.18.1"
+ }
+ },
+ "node_modules/unenv": {
+ "version": "2.0.0-rc.24",
+ "resolved": "https://registry.npmjs.org/unenv/-/unenv-2.0.0-rc.24.tgz",
+ "integrity": "sha512-i7qRCmY42zmCwnYlh9H2SvLEypEFGye5iRmEMKjcGi7zk9UquigRjFtTLz0TYqr0ZGLZhaMHl/foy1bZR+Cwlw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "pathe": "^2.0.3"
+ }
+ },
+ "node_modules/workerd": {
+ "version": "1.20260508.1",
+ "resolved": "https://registry.npmjs.org/workerd/-/workerd-1.20260508.1.tgz",
+ "integrity": "sha512-VlnjyH3AjVddpSK7J54nsCVgf8i2733pl8GjKttfNi7vN/hEjjAk20d2b1nDToOLKvRQpTewRnVkqaaeGHCaAw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "Apache-2.0",
+ "bin": {
+ "workerd": "bin/workerd"
+ },
+ "engines": {
+ "node": ">=16"
+ },
+ "optionalDependencies": {
+ "@cloudflare/workerd-darwin-64": "1.20260508.1",
+ "@cloudflare/workerd-darwin-arm64": "1.20260508.1",
+ "@cloudflare/workerd-linux-64": "1.20260508.1",
+ "@cloudflare/workerd-linux-arm64": "1.20260508.1",
+ "@cloudflare/workerd-windows-64": "1.20260508.1"
+ }
+ },
+ "node_modules/wrangler": {
+ "version": "4.90.1",
+ "resolved": "https://registry.npmjs.org/wrangler/-/wrangler-4.90.1.tgz",
+ "integrity": "sha512-u2KrieKSMfRM0toTst/CfDtcRraeoVjmcExcMWgILM/ytq3qcDhuOAULoZSyPHzma43lfLJy1BC544drFyqe1A==",
+ "dev": true,
+ "license": "MIT OR Apache-2.0",
+ "dependencies": {
+ "@cloudflare/kv-asset-handler": "0.5.0",
+ "@cloudflare/unenv-preset": "2.16.1",
+ "blake3-wasm": "2.1.5",
+ "esbuild": "0.27.3",
+ "miniflare": "4.20260508.0",
+ "path-to-regexp": "6.3.0",
+ "unenv": "2.0.0-rc.24",
+ "workerd": "1.20260508.1"
+ },
+ "bin": {
+ "wrangler": "bin/wrangler.js",
+ "wrangler2": "bin/wrangler.js"
+ },
+ "engines": {
+ "node": ">=22.0.0"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.2"
+ },
+ "peerDependencies": {
+ "@cloudflare/workers-types": "^4.20260508.1"
+ },
+ "peerDependenciesMeta": {
+ "@cloudflare/workers-types": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/ws": {
+ "version": "8.18.0",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz",
+ "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=10.0.0"
+ },
+ "peerDependencies": {
+ "bufferutil": "^4.0.1",
+ "utf-8-validate": ">=5.0.2"
+ },
+ "peerDependenciesMeta": {
+ "bufferutil": {
+ "optional": true
+ },
+ "utf-8-validate": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/youch": {
+ "version": "4.1.0-beta.10",
+ "resolved": "https://registry.npmjs.org/youch/-/youch-4.1.0-beta.10.tgz",
+ "integrity": "sha512-rLfVLB4FgQneDr0dv1oddCVZmKjcJ6yX6mS4pU82Mq/Dt9a3cLZQ62pDBL4AUO+uVrCvtWz3ZFUL2HFAFJ/BXQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@poppinss/colors": "^4.1.5",
+ "@poppinss/dumper": "^0.6.4",
+ "@speed-highlight/core": "^1.2.7",
+ "cookie": "^1.0.2",
+ "youch-core": "^0.3.3"
+ }
+ },
+ "node_modules/youch-core": {
+ "version": "0.3.3",
+ "resolved": "https://registry.npmjs.org/youch-core/-/youch-core-0.3.3.tgz",
+ "integrity": "sha512-ho7XuGjLaJ2hWHoK8yFnsUGy2Y5uDpqSTq1FkHLK4/oqKtyUU1AFbOOxY4IpC9f0fTLjwYbslUz0Po5BpD1wrA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@poppinss/exception": "^1.2.2",
+ "error-stack-parser-es": "^1.0.5"
+ }
+ }
+ }
+}
diff --git a/app_python/edge-api/package.json b/app_python/edge-api/package.json
new file mode 100644
index 0000000000..c8164e658c
--- /dev/null
+++ b/app_python/edge-api/package.json
@@ -0,0 +1,24 @@
+{
+ "name": "edge-api",
+ "version": "1.0.0",
+ "description": "Cloudflare Workers edge API deployment",
+ "main": "src/index.ts",
+ "scripts": {
+ "dev": "wrangler dev",
+ "deploy": "wrangler deploy",
+ "start": "wrangler dev",
+ "test": "echo \"no tests yet\""
+ },
+ "keywords": [
+ "cloudflare",
+ "workers",
+ "edge",
+ "devops"
+ ],
+ "author": "Diana Yakupova",
+ "license": "MIT",
+ "devDependencies": {
+ "@cloudflare/workers-types": "^4.20240815.1",
+ "wrangler": "^4.90.1"
+ }
+}
diff --git a/app_python/edge-api/src/index.ts b/app_python/edge-api/src/index.ts
new file mode 100644
index 0000000000..c978b6bf7f
--- /dev/null
+++ b/app_python/edge-api/src/index.ts
@@ -0,0 +1,131 @@
+export interface Env {
+ APP_NAME: string;
+ COURSE_NAME: string;
+ VERSION: string;
+ API_TOKEN?: string;
+ ADMIN_EMAIL?: string;
+ SETTINGS: KVNamespace;
+}
+
+export default {
+ async fetch(request: Request, env: Env): Promise {
+ const url = new URL(request.url);
+
+ console.log("incoming request", {
+ method: request.method,
+ path: url.pathname,
+ colo: request.cf?.colo,
+ country: request.cf?.country,
+ timestamp: new Date().toISOString(),
+ });
+
+ // Health check endpoint
+ if (url.pathname === "/health") {
+ return Response.json(
+ {
+ status: "ok",
+ timestamp: new Date().toISOString(),
+ colo: request.cf?.colo,
+ },
+ { status: 200 },
+ );
+ }
+
+ // Home endpoint
+ if (url.pathname === "/") {
+ return Response.json(
+ {
+ app: env.APP_NAME,
+ version: env.VERSION,
+ course: env.COURSE_NAME,
+ message: "Hello from Cloudflare Workers Edge",
+ timestamp: new Date().toISOString(),
+ },
+ { status: 200 },
+ );
+ }
+
+ // App info endpoint
+ if (url.pathname === "/app-info") {
+ return Response.json(
+ {
+ app: env.APP_NAME,
+ version: env.VERSION,
+ course: env.COURSE_NAME,
+ environment: "production",
+ runtime: "cloudflare-workers",
+ timestamp: new Date().toISOString(),
+ },
+ { status: 200 },
+ );
+ }
+
+ // Edge metadata endpoint
+ if (url.pathname === "/edge") {
+ return Response.json(
+ {
+ colo: request.cf?.colo,
+ country: request.cf?.country,
+ city: request.cf?.city,
+ asn: request.cf?.asn,
+ httpProtocol: request.cf?.httpProtocol,
+ tlsVersion: request.cf?.tlsVersion,
+ continent: request.cf?.continent,
+ latitude: request.cf?.latitude,
+ longitude: request.cf?.longitude,
+ },
+ { status: 200 },
+ );
+ }
+
+ // Admin endpoint (with secret check)
+ if (url.pathname === "/admin") {
+ return Response.json(
+ {
+ admin: env.ADMIN_EMAIL || "admin@example.com",
+ hasToken: !!env.API_TOKEN,
+ message: "Admin endpoint - protected by secret",
+ timestamp: new Date().toISOString(),
+ },
+ { status: 200 },
+ );
+ }
+
+ // Persistent counter using KV
+ if (url.pathname === "/counter") {
+ try {
+ const raw = await env.SETTINGS.get("visits");
+ const visits = Number(raw ?? "0") + 1;
+ await env.SETTINGS.put("visits", String(visits));
+
+ return Response.json(
+ {
+ visits,
+ message: "Counter persisted in KV",
+ timestamp: new Date().toISOString(),
+ },
+ { status: 200 },
+ );
+ } catch (error) {
+ console.error("Counter error:", error);
+ return Response.json(
+ {
+ error: "KV not available in local development",
+ visits: 0,
+ },
+ { status: 200 },
+ );
+ }
+ }
+
+ // 404 for unknown routes
+ return Response.json(
+ {
+ error: "Not found",
+ path: url.pathname,
+ available: ["/", "/health", "/app-info", "/edge", "/counter", "/admin"],
+ },
+ { status: 404 },
+ );
+ },
+};
diff --git a/app_python/edge-api/test-local.js b/app_python/edge-api/test-local.js
new file mode 100644
index 0000000000..68513fa26b
--- /dev/null
+++ b/app_python/edge-api/test-local.js
@@ -0,0 +1,31 @@
+// Simple local test without Wrangler
+const url = new URL("http://localhost:8787/");
+
+const mockRequest = {
+ url: "http://localhost:8787/health",
+ method: "GET",
+ cf: {
+ colo: "SJC",
+ country: "US",
+ city: "San Jose"
+ }
+};
+
+const env = {
+ APP_NAME: "edge-api",
+ COURSE_NAME: "DevOps-Core",
+ VERSION: "1.0.0",
+ API_TOKEN: "test-token",
+ ADMIN_EMAIL: "admin@example.com",
+ SETTINGS: {
+ get: async () => null,
+ put: async () => {}
+ }
+};
+
+console.log("✓ Project structure created successfully");
+console.log("✓ Files created:");
+console.log(" - src/index.ts (Worker code)");
+console.log(" - wrangler.jsonc (Configuration)");
+console.log(" - package.json (Dependencies)");
+console.log(" - tsconfig.json (TypeScript config)");
diff --git a/app_python/edge-api/tsconfig.json b/app_python/edge-api/tsconfig.json
new file mode 100644
index 0000000000..f4c57bdb4a
--- /dev/null
+++ b/app_python/edge-api/tsconfig.json
@@ -0,0 +1,25 @@
+{
+ "compilerOptions": {
+ "target": "ES2023",
+ "module": "ESNext",
+ "lib": ["ES2023"],
+ "jsx": "react-jsx",
+ "declaration": true,
+ "declarationMap": true,
+ "sourceMap": true,
+ "outDir": "./dist",
+ "rootDir": "./src",
+ "composite": true,
+ "resolveJsonModule": true,
+ "allowJs": true,
+ "strict": true,
+ "esModuleInterop": true,
+ "skipLibCheck": true,
+ "forceConsistentCasingInFileNames": true,
+ "noImplicitAny": true,
+ "strictNullChecks": true,
+ "strictFunctionTypes": true
+ },
+ "include": ["src/**/*"],
+ "exclude": ["node_modules", "dist", "test"]
+}
diff --git a/app_python/edge-api/wrangler.jsonc b/app_python/edge-api/wrangler.jsonc
new file mode 100644
index 0000000000..e7115faabf
--- /dev/null
+++ b/app_python/edge-api/wrangler.jsonc
@@ -0,0 +1,16 @@
+{
+ "name": "edge-api",
+ "main": "src/index.ts",
+ "compatibility_date": "2024-08-15",
+ "vars": {
+ "APP_NAME": "edge-api",
+ "COURSE_NAME": "DevOps-Core",
+ "VERSION": "1.0.0"
+ },
+ "kv_namespaces": [
+ {
+ "binding": "SETTINGS",
+ "id": "5fc4b21e9d3d43dba317ea7cd74544f5"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/app_python/k8s/argocd/application-dev.yaml b/app_python/k8s/argocd/application-dev.yaml
new file mode 100644
index 0000000000..358971b02e
--- /dev/null
+++ b/app_python/k8s/argocd/application-dev.yaml
@@ -0,0 +1,23 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: python-app-dev
+ namespace: argocd
+spec:
+ project: default
+ source:
+ repoURL: https://github.com/versceana/DevOps-Core-Course
+ targetRevision: lab13
+ path: app_python/k8s/my-python-app
+ helm:
+ valueFiles:
+ - values-dev.yaml
+ destination:
+ server: https://kubernetes.default.svc
+ namespace: dev
+ syncPolicy:
+ automated:
+ prune: true
+ selfHeal: true
+ syncOptions:
+ - CreateNamespace=true
diff --git a/app_python/k8s/argocd/application-prod.yaml b/app_python/k8s/argocd/application-prod.yaml
new file mode 100644
index 0000000000..3290ccbd75
--- /dev/null
+++ b/app_python/k8s/argocd/application-prod.yaml
@@ -0,0 +1,20 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: python-app-prod
+ namespace: argocd
+spec:
+ project: default
+ source:
+ repoURL: https://github.com/versceana/DevOps-Core-Course
+ targetRevision: lab13
+ path: app_python/k8s/my-python-app
+ helm:
+ valueFiles:
+ - values-prod.yaml
+ destination:
+ server: https://kubernetes.default.svc
+ namespace: prod
+ syncPolicy:
+ syncOptions:
+ - CreateNamespace=true
diff --git a/app_python/k8s/argocd/application.yaml b/app_python/k8s/argocd/application.yaml
new file mode 100644
index 0000000000..c73e93c486
--- /dev/null
+++ b/app_python/k8s/argocd/application.yaml
@@ -0,0 +1,20 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: python-app
+ namespace: argocd
+spec:
+ project: default
+ source:
+ repoURL: https://github.com/versceana/DevOps-Core-Course
+ targetRevision: lab13
+ path: k8s/my-python-app
+ helm:
+ valueFiles:
+ - values.yaml
+ destination:
+ server: https://kubernetes.default.svc
+ namespace: default
+ syncPolicy:
+ syncOptions:
+ - CreateNamespace=true
diff --git a/app_python/k8s/deployment.yml b/app_python/k8s/deployment.yml
new file mode 100644
index 0000000000..1020c5f4c2
--- /dev/null
+++ b/app_python/k8s/deployment.yml
@@ -0,0 +1,44 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: devops-info-service
+ labels:
+ app: devops-info-service
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: devops-info-service
+ template:
+ metadata:
+ labels:
+ app: devops-info-service
+ spec:
+ containers:
+ - name: app
+ image: versceana/devops-info-service:latest
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 5000
+ env:
+ - name: COUNTER_FILE
+ value: "/data/visits"
+ resources:
+ requests:
+ memory: "128Mi"
+ cpu: "100m"
+ limits:
+ memory: "256Mi"
+ cpu: "200m"
+ livenessProbe:
+ httpGet:
+ path: /health
+ port: 5000
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ readinessProbe:
+ httpGet:
+ path: /health
+ port: 5000
+ initialDelaySeconds: 5
+ periodSeconds: 3
diff --git a/app_python/k8s/init-download.yaml b/app_python/k8s/init-download.yaml
new file mode 100644
index 0000000000..fab7f15f4f
--- /dev/null
+++ b/app_python/k8s/init-download.yaml
@@ -0,0 +1,28 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: init-download-demo
+ namespace: default
+spec:
+ initContainers:
+ - name: init-download
+ image: busybox:1.36
+ command:
+ [
+ "sh",
+ "-c",
+ 'wget -O /work-dir/index.html https://www.example.com && echo "Downloaded successfully"',
+ ]
+ volumeMounts:
+ - name: workdir
+ mountPath: /work-dir
+ containers:
+ - name: main-app
+ image: busybox:1.36
+ command: ["sh", "-c", 'echo "Waiting..."; sleep 3600']
+ volumeMounts:
+ - name: workdir
+ mountPath: /data
+ volumes:
+ - name: workdir
+ emptyDir: {}
diff --git a/app_python/k8s/init-wait-for-service.yaml b/app_python/k8s/init-wait-for-service.yaml
new file mode 100644
index 0000000000..a94940226a
--- /dev/null
+++ b/app_python/k8s/init-wait-for-service.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: init-wait-service-demo
+ namespace: default
+spec:
+ initContainers:
+ - name: wait-for-service
+ image: busybox:1.36
+ command:
+ [
+ "sh",
+ "-c",
+ 'echo "Waiting for monitoring-grafana service..."; until wget -q -O- http://monitoring-grafana.monitoring:80 > /dev/null 2>&1; do echo "Service not ready, retrying..."; sleep 2; done; echo "Service is ready!"',
+ ]
+ containers:
+ - name: main-app
+ image: busybox:1.36
+ command: ["sh", "-c", 'echo "Main container started! Service dependency satisfied."; sleep 3600']
+ restartPolicy: Never
diff --git a/app_python/k8s/my-python-app/.helmignore b/app_python/k8s/my-python-app/.helmignore
new file mode 100644
index 0000000000..0e8a0eb36f
--- /dev/null
+++ b/app_python/k8s/my-python-app/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/app_python/k8s/my-python-app/Chart.yaml b/app_python/k8s/my-python-app/Chart.yaml
new file mode 100644
index 0000000000..ca59e2993a
--- /dev/null
+++ b/app_python/k8s/my-python-app/Chart.yaml
@@ -0,0 +1,9 @@
+apiVersion: v2
+name: my-python-app
+description: A Helm chart for DevOps info service (Flask)
+type: application
+version: 0.1.0
+appVersion: "1.0.0"
+maintainers:
+ - name: Diana Yakupova
+ email: diana@example.com
diff --git a/app_python/k8s/my-python-app/files/config.json b/app_python/k8s/my-python-app/files/config.json
new file mode 100644
index 0000000000..5518153907
--- /dev/null
+++ b/app_python/k8s/my-python-app/files/config.json
@@ -0,0 +1,8 @@
+{
+ "app_name": "devops-info-service",
+ "environment": "production",
+ "features": {
+ "visits_counter": true,
+ "debug_mode": false
+ }
+}
diff --git a/app_python/k8s/my-python-app/rollout-bluegreen.yaml b/app_python/k8s/my-python-app/rollout-bluegreen.yaml
new file mode 100644
index 0000000000..1a5e148403
--- /dev/null
+++ b/app_python/k8s/my-python-app/rollout-bluegreen.yaml
@@ -0,0 +1,26 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Rollout
+metadata:
+ name: python-app-bg
+ labels:
+ app: python-app-bg
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: python-app-bg
+ template:
+ metadata:
+ labels:
+ app: python-app-bg
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:latest
+ ports:
+ - containerPort: 80
+ strategy:
+ blueGreen:
+ activeService: python-app-bg-active
+ previewService: python-app-bg-preview
+ autoPromotionEnabled: false
diff --git a/app_python/k8s/my-python-app/services-bluegreen.yaml b/app_python/k8s/my-python-app/services-bluegreen.yaml
new file mode 100644
index 0000000000..28a5bf1a2a
--- /dev/null
+++ b/app_python/k8s/my-python-app/services-bluegreen.yaml
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: python-app-bg-active
+spec:
+ selector:
+ app: python-app-bg
+ ports:
+ - port: 80
+ targetPort: 80
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: python-app-bg-preview
+spec:
+ selector:
+ app: python-app-bg
+ ports:
+ - port: 80
+ targetPort: 80
diff --git a/app_python/k8s/my-python-app/templates/_helpers.tpl b/app_python/k8s/my-python-app/templates/_helpers.tpl
new file mode 100644
index 0000000000..f111aa6da8
--- /dev/null
+++ b/app_python/k8s/my-python-app/templates/_helpers.tpl
@@ -0,0 +1,49 @@
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "my-python-app.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+*/}}
+{{- define "my-python-app.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "my-python-app.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "my-python-app.labels" -}}
+helm.sh/chart: {{ include "my-python-app.chart" . }}
+{{ include "my-python-app.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "my-python-app.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "my-python-app.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
diff --git a/app_python/k8s/my-python-app/templates/configmap.yaml b/app_python/k8s/my-python-app/templates/configmap.yaml
new file mode 100644
index 0000000000..76fb67e0bf
--- /dev/null
+++ b/app_python/k8s/my-python-app/templates/configmap.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "my-python-app.fullname" . }}-config
+ labels:
+ {{- include "my-python-app.labels" . | nindent 4 }}
+data:
+ config.json: |-
+{{ .Files.Get "files/config.json" | indent 4 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "my-python-app.fullname" . }}-env
+ labels:
+ {{- include "my-python-app.labels" . | nindent 4 }}
+data:
+ APP_ENV: {{ .Values.environment | default "production" | quote }}
+ LOG_LEVEL: {{ .Values.logLevel | default "INFO" | quote }}
+ COUNTER_FILE: {{ .Values.counterFile | default "/data/visits" | quote }}
diff --git a/app_python/k8s/my-python-app/templates/deployment.yaml.bak b/app_python/k8s/my-python-app/templates/deployment.yaml.bak
new file mode 100644
index 0000000000..4c862cac27
--- /dev/null
+++ b/app_python/k8s/my-python-app/templates/deployment.yaml.bak
@@ -0,0 +1,37 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "my-python-app.fullname" . }}
+ labels:
+ {{- include "my-python-app.labels" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "my-python-app.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "my-python-app.selectorLabels" . | nindent 8 }}
+ spec:
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - containerPort: {{ .Values.service.targetPort }}
+ name: http
+ livenessProbe:
+ httpGet:
+ path: /health
+ port: {{ .Values.service.targetPort }}
+ initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+ readinessProbe:
+ httpGet:
+ path: /health
+ port: {{ .Values.service.targetPort }}
+ initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
+ resources:
+ {{- toYaml .Values.resources | nindent 10 }}
diff --git a/app_python/k8s/my-python-app/templates/headless-service.yaml b/app_python/k8s/my-python-app/templates/headless-service.yaml
new file mode 100644
index 0000000000..9bf96cd590
--- /dev/null
+++ b/app_python/k8s/my-python-app/templates/headless-service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "my-python-app.fullname" . }}-headless
+ labels:
+ {{- include "my-python-app.labels" . | nindent 4 }}
+spec:
+ clusterIP: None
+ selector:
+ {{- include "my-python-app.selectorLabels" . | nindent 4 }}
+ ports:
+ - name: http
+ port: {{ .Values.service.port }}
+ targetPort: {{ .Values.service.targetPort }}
diff --git a/app_python/k8s/my-python-app/templates/rollout.yaml.bak b/app_python/k8s/my-python-app/templates/rollout.yaml.bak
new file mode 100644
index 0000000000..aeb3186024
--- /dev/null
+++ b/app_python/k8s/my-python-app/templates/rollout.yaml.bak
@@ -0,0 +1,48 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Rollout
+metadata:
+ name: {{ include "my-python-app.fullname" . }}
+ labels:
+ {{- include "my-python-app.labels" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "my-python-app.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "my-python-app.selectorLabels" . | nindent 8 }}
+ spec:
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - containerPort: {{ .Values.service.targetPort }}
+ name: http
+ env:
+ - name: APP_ENV
+ valueFrom:
+ configMapKeyRef:
+ name: {{ include "my-python-app.fullname" . }}-env
+ key: APP_ENV
+ - name: LOG_LEVEL
+ valueFrom:
+ configMapKeyRef:
+ name: {{ include "my-python-app.fullname" . }}-env
+ key: LOG_LEVEL
+ resources:
+ {{- toYaml .Values.resources | nindent 10 }}
+ strategy:
+ canary:
+ steps:
+ - setWeight: 20
+ - pause: {}
+ - setWeight: 40
+ - pause: { duration: 30s }
+ - setWeight: 60
+ - pause: { duration: 30s }
+ - setWeight: 80
+ - pause: { duration: 30s }
+ - setWeight: 100
diff --git a/app_python/k8s/my-python-app/templates/secrets.yaml b/app_python/k8s/my-python-app/templates/secrets.yaml
new file mode 100644
index 0000000000..d243d1eb99
--- /dev/null
+++ b/app_python/k8s/my-python-app/templates/secrets.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "my-python-app.fullname" . }}-secret
+ labels:
+ {{- include "my-python-app.labels" . | nindent 4 }}
+type: Opaque
+stringData:
+ username: {{ .Values.secrets.username | default "placeholder_user" }}
+ password: {{ .Values.secrets.password | default "placeholder_pass" }}
diff --git a/app_python/k8s/my-python-app/templates/service.yaml b/app_python/k8s/my-python-app/templates/service.yaml
new file mode 100644
index 0000000000..e19713b39c
--- /dev/null
+++ b/app_python/k8s/my-python-app/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "my-python-app.fullname" . }}
+ labels:
+ {{- include "my-python-app.labels" . | nindent 4 }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: {{ .Values.service.targetPort }}
+ protocol: TCP
+ name: http
+ selector:
+ {{- include "my-python-app.selectorLabels" . | nindent 4 }}
diff --git a/app_python/k8s/my-python-app/templates/statefulset.yaml b/app_python/k8s/my-python-app/templates/statefulset.yaml
new file mode 100644
index 0000000000..16484e613e
--- /dev/null
+++ b/app_python/k8s/my-python-app/templates/statefulset.yaml
@@ -0,0 +1,63 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ include "my-python-app.fullname" . }}-stateful
+ labels:
+ {{- include "my-python-app.labels" . | nindent 4 }}
+spec:
+ serviceName: {{ include "my-python-app.fullname" . }}-headless
+ replicas: {{ .Values.replicaCount }}
+ updateStrategy:
+ type: {{ .Values.statefulset.updateStrategy.type | default "RollingUpdate" }}
+ {{- if eq (.Values.statefulset.updateStrategy.type | default "RollingUpdate") "RollingUpdate" }}
+ rollingUpdate:
+ partition: {{ .Values.statefulset.updateStrategy.partition | default 0 }}
+ {{- end }}
+ selector:
+ matchLabels:
+ {{- include "my-python-app.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "my-python-app.selectorLabels" . | nindent 8 }}
+ spec:
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - containerPort: {{ .Values.service.targetPort }}
+ name: http
+ envFrom:
+ - secretRef:
+ name: {{ include "my-python-app.fullname" . }}-secret
+ - configMapRef:
+ name: {{ include "my-python-app.fullname" . }}-env
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ livenessProbe:
+ httpGet:
+ path: /health
+ port: {{ .Values.service.targetPort }}
+ initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+ readinessProbe:
+ httpGet:
+ path: /health
+ port: {{ .Values.service.targetPort }}
+ initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
+ resources:
+ {{- toYaml .Values.resources | nindent 10 }}
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size }}
+ {{- if .Values.persistence.storageClass }}
+ storageClassName: {{ .Values.persistence.storageClass }}
+ {{- end }}
diff --git a/app_python/k8s/my-python-app/values-bluegreen.yaml b/app_python/k8s/my-python-app/values-bluegreen.yaml
new file mode 100644
index 0000000000..3c0aea9fae
--- /dev/null
+++ b/app_python/k8s/my-python-app/values-bluegreen.yaml
@@ -0,0 +1,30 @@
+replicaCount: 2
+
+image:
+ repository: nginx
+ tag: latest
+ pullPolicy: IfNotPresent
+
+service:
+ type: ClusterIP
+ port: 80
+ targetPort: 80
+
+resources:
+ limits:
+ cpu: 200m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 64Mi
+
+persistence:
+ enabled: false
+
+secrets:
+ username: "bg_user"
+ password: "bg_pass"
+
+environment: "bluegreen"
+logLevel: "INFO"
+counterFile: "/data/visits"
diff --git a/app_python/k8s/my-python-app/values-dev.yaml b/app_python/k8s/my-python-app/values-dev.yaml
new file mode 100644
index 0000000000..d1a66046b9
--- /dev/null
+++ b/app_python/k8s/my-python-app/values-dev.yaml
@@ -0,0 +1,15 @@
+replicaCount: 1
+image:
+ tag: latest
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 50m
+ memory: 64Mi
+service:
+ type: NodePort
+livenessProbe:
+ initialDelaySeconds: 5
+ periodSeconds: 10
diff --git a/app_python/k8s/my-python-app/values-prod.yaml b/app_python/k8s/my-python-app/values-prod.yaml
new file mode 100644
index 0000000000..4237229033
--- /dev/null
+++ b/app_python/k8s/my-python-app/values-prod.yaml
@@ -0,0 +1,18 @@
+replicaCount: 3
+image:
+ tag: latest
+resources:
+ limits:
+ cpu: 500m
+ memory: 512Mi
+ requests:
+ cpu: 200m
+ memory: 256Mi
+service:
+ type: LoadBalancer
+livenessProbe:
+ initialDelaySeconds: 30
+ periodSeconds: 5
+readinessProbe:
+ initialDelaySeconds: 10
+ periodSeconds: 3
diff --git a/app_python/k8s/my-python-app/values.yaml b/app_python/k8s/my-python-app/values.yaml
new file mode 100644
index 0000000000..7a472144e6
--- /dev/null
+++ b/app_python/k8s/my-python-app/values.yaml
@@ -0,0 +1,47 @@
+# Default values for my-python-app
+replicaCount: 3
+
+image:
+ repository: versceana/devops-info-service
+ tag: latest
+ pullPolicy: IfNotPresent
+
+service:
+ type: NodePort
+ port: 80
+ targetPort: 5000
+
+livenessProbe:
+ initialDelaySeconds: 10
+ periodSeconds: 5
+
+readinessProbe:
+ initialDelaySeconds: 5
+ periodSeconds: 3
+
+resources:
+ limits:
+ cpu: 200m
+ memory: 256Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
+# Secrets configuration (use --set or external secrets in production)
+secrets:
+ username: "change_me"
+ password: "change_me"
+
+environment: "production"
+logLevel: "INFO"
+counterFile: "/data/visits"
+
+persistence:
+ enabled: true
+ size: 100Mi
+ storageClass: ""
+
+statefulset:
+ updateStrategy:
+ type: RollingUpdate
+ partition: 0
diff --git a/app_python/k8s/service.yml b/app_python/k8s/service.yml
new file mode 100644
index 0000000000..b42864cdfd
--- /dev/null
+++ b/app_python/k8s/service.yml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: devops-info-service
+spec:
+ type: NodePort
+ selector:
+ app: devops-info-service
+ ports:
+ - protocol: TCP
+ port: 80
+ targetPort: 5000
+ nodePort: 30080
diff --git a/app_python/monitoring/.gitignore b/app_python/monitoring/.gitignore
new file mode 100644
index 0000000000..4c49bd78f1
--- /dev/null
+++ b/app_python/monitoring/.gitignore
@@ -0,0 +1 @@
+.env
diff --git a/app_python/monitoring/docker-compose.yml b/app_python/monitoring/docker-compose.yml
new file mode 100644
index 0000000000..cbc56d3da0
--- /dev/null
+++ b/app_python/monitoring/docker-compose.yml
@@ -0,0 +1,175 @@
+version: "3.8"
+
+services:
+ loki:
+ image: grafana/loki:3.0.0
+ container_name: loki
+ command: -config.file=/etc/loki/config.yml
+ ports:
+ - "3100:3100"
+ volumes:
+ - ./loki/config.yml:/etc/loki/config.yml:ro
+ - loki-data:/loki
+ networks:
+ - logging
+ labels:
+ logging: "promtail"
+ app: "loki"
+ healthcheck:
+ test: ["CMD-SHELL", "wget -q -O /dev/null http://localhost:3100/ready || exit 1"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+ start_period: 10s
+ restart: unless-stopped
+ deploy:
+ resources:
+ limits:
+ cpus: '1.0'
+ memory: 1G
+ reservations:
+ cpus: '0.5'
+ memory: 512M
+
+ promtail:
+ image: grafana/promtail:3.0.0
+ container_name: promtail
+ command: -config.file=/etc/promtail/config.yml
+ ports:
+ - "9080:9080"
+ volumes:
+ - ./promtail/config.yml:/etc/promtail/config.yml:ro
+ - /var/lib/docker/containers:/var/lib/docker/containers:ro
+ - /var/run/docker.sock:/var/run/docker.sock:ro
+ - promtail-positions:/positions
+ networks:
+ - logging
+ labels:
+ logging: "promtail"
+ app: "promtail"
+ depends_on:
+ loki:
+ condition: service_healthy
+ healthcheck:
+ test: ["CMD-SHELL", "wget -q -O /dev/null http://localhost:9080/targets || exit 1"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+ start_period: 10s
+ restart: unless-stopped
+ deploy:
+ resources:
+ limits:
+ cpus: '1.0'
+ memory: 1G
+ reservations:
+ cpus: '0.5'
+ memory: 512M
+
+ grafana:
+ image: grafana/grafana:12.3.1
+ container_name: grafana
+ ports:
+ - "3000:3000"
+ environment:
+ GF_AUTH_ANONYMOUS_ENABLED: "false"
+ GF_USERS_ALLOW_SIGN_UP: "false"
+ GF_SECURITY_ADMIN_USER: admin
+ GF_SECURITY_ADMIN_PASSWORD: ${GF_SECURITY_ADMIN_PASSWORD}
+ GF_SERVER_ROOT_URL: http://localhost:3000
+ volumes:
+ - grafana-data:/var/lib/grafana
+ - ./grafana/provisioning:/etc/grafana/provisioning:ro
+ networks:
+ - logging
+ labels:
+ logging: "promtail"
+ app: "grafana"
+ depends_on:
+ loki:
+ condition: service_healthy
+ healthcheck:
+ test: ["CMD-SHELL", "wget -q -O /dev/null http://localhost:3000/api/health || exit 1"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+ start_period: 20s
+ restart: unless-stopped
+ deploy:
+ resources:
+ limits:
+ cpus: '1.0'
+ memory: 1G
+ reservations:
+ cpus: '0.5'
+ memory: 512M
+
+ app-python:
+ build:
+ context: ..
+ dockerfile: Dockerfile
+ image: devops-info-service:lab8
+ platform: linux/amd64
+ container_name: app-python
+ ports:
+ - "8000:5000"
+ volumes:
+ - ../app.py:/app/app.py:ro
+ networks:
+ - logging
+ labels:
+ logging: "promtail"
+ app: "devops-python"
+ depends_on:
+ loki:
+ condition: service_healthy
+ healthcheck:
+ test: ["CMD-SHELL", "curl -f http://localhost:5000/health || exit 1"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+ start_period: 10s
+ restart: unless-stopped
+
+ prometheus:
+ image: prom/prometheus:v3.9.0
+ container_name: prometheus
+ command:
+ - '--config.file=/etc/prometheus/prometheus.yml'
+ - '--storage.tsdb.retention.time=15d'
+ - '--storage.tsdb.retention.size=10GB'
+ ports:
+ - "9090:9090"
+ volumes:
+ - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
+ - prometheus-data:/prometheus
+ networks:
+ - logging
+ labels:
+ logging: "promtail"
+ app: "prometheus"
+ healthcheck:
+ test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:9090/-/healthy || exit 1"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+ start_period: 10s
+ restart: unless-stopped
+ deploy:
+ resources:
+ limits:
+ cpus: '1.0'
+ memory: 1G
+ reservations:
+ cpus: '0.5'
+ memory: 512M
+
+volumes:
+ loki-data:
+ grafana-data:
+ promtail-positions:
+ prometheus-data:
+
+networks:
+ logging:
+ driver: bridge
diff --git a/app_python/monitoring/grafana/provisioning/datasources/loki.yml b/app_python/monitoring/grafana/provisioning/datasources/loki.yml
new file mode 100644
index 0000000000..d64bee8027
--- /dev/null
+++ b/app_python/monitoring/grafana/provisioning/datasources/loki.yml
@@ -0,0 +1,10 @@
+apiVersion: 1
+
+datasources:
+ - name: Loki
+ type: loki
+ access: proxy
+ orgId: 1
+ url: http://loki:3100
+ isDefault: true
+ editable: true
diff --git a/app_python/monitoring/loki/config.yml b/app_python/monitoring/loki/config.yml
new file mode 100644
index 0000000000..b7d44d1ea5
--- /dev/null
+++ b/app_python/monitoring/loki/config.yml
@@ -0,0 +1,25 @@
+auth_enabled: false
+
+server:
+ http_listen_port: 3100
+
+common:
+ path_prefix: /loki
+ storage:
+ filesystem:
+ chunks_directory: /loki/chunks
+ rules_directory: /loki/rules
+ replication_factor: 1
+ ring:
+ kvstore:
+ store: inmemory
+
+schema_config:
+ configs:
+ - from: "2025-01-01"
+ store: tsdb
+ object_store: filesystem
+ schema: v13
+ index:
+ prefix: index_
+ period: 24h
diff --git a/app_python/monitoring/prometheus/prometheus.yml b/app_python/monitoring/prometheus/prometheus.yml
new file mode 100644
index 0000000000..6f4aab45bb
--- /dev/null
+++ b/app_python/monitoring/prometheus/prometheus.yml
@@ -0,0 +1,23 @@
+global:
+ scrape_interval: 15s
+ evaluation_interval: 15s
+
+scrape_configs:
+ - job_name: 'prometheus'
+ static_configs:
+ - targets: ['localhost:9090']
+
+ - job_name: 'loki'
+ static_configs:
+ - targets: ['loki:3100']
+ metrics_path: '/metrics'
+
+ - job_name: 'grafana'
+ static_configs:
+ - targets: ['grafana:3000']
+ metrics_path: '/metrics'
+
+ - job_name: 'app'
+ static_configs:
+ - targets: ['app-python:5000'] # обрати внимание: порт 5000 внутри контейнера
+ metrics_path: '/metrics'
diff --git a/app_python/monitoring/promtail/config.yml b/app_python/monitoring/promtail/config.yml
new file mode 100644
index 0000000000..3a070bbda6
--- /dev/null
+++ b/app_python/monitoring/promtail/config.yml
@@ -0,0 +1,34 @@
+server:
+ http_listen_port: 9080
+ grpc_listen_port: 0
+
+positions:
+ filename: /positions/positions.yml
+
+clients:
+ - url: http://loki:3100/loki/api/v1/push
+
+scrape_configs:
+ - job_name: docker
+ docker_sd_configs:
+ - host: unix:///var/run/docker.sock
+ refresh_interval: 5s
+
+ relabel_configs:
+ - source_labels: [__meta_docker_container_label_logging]
+ regex: promtail
+ action: keep
+
+ - source_labels: [__meta_docker_container_name]
+ regex: "/(.*)"
+ target_label: container
+
+ - source_labels: [__meta_docker_container_label_app]
+ target_label: app
+
+ - target_label: job
+ replacement: docker
+
+ - source_labels: [__meta_docker_container_id]
+ target_label: __path__
+ replacement: /var/lib/docker/containers/$1/*-json.log
diff --git a/app_python/pulumi/.gitignore b/app_python/pulumi/.gitignore
new file mode 100644
index 0000000000..a3807e5bdb
--- /dev/null
+++ b/app_python/pulumi/.gitignore
@@ -0,0 +1,2 @@
+*.pyc
+venv/
diff --git a/app_python/pulumi/Pulumi.dev.yaml b/app_python/pulumi/Pulumi.dev.yaml
new file mode 100644
index 0000000000..a54d54385c
--- /dev/null
+++ b/app_python/pulumi/Pulumi.dev.yaml
@@ -0,0 +1,5 @@
+config:
+ aws:region: us-east-1
+ lab4-vm:key_name: vockey
+ lab4-vm:allowed_ssh_ip:
+ secure: AAABACtaxnsJyyQ6x52lUn6w86d34eLEu4K8zmL9dhfGHS/INLyTxTRmsc2EBUUY
diff --git a/app_python/pulumi/Pulumi.yaml b/app_python/pulumi/Pulumi.yaml
new file mode 100644
index 0000000000..248357fa3e
--- /dev/null
+++ b/app_python/pulumi/Pulumi.yaml
@@ -0,0 +1,11 @@
+name: lab4-vm
+description: Lab4 VM with Pulumi
+runtime:
+ name: python
+ options:
+ toolchain: pip
+ virtualenv: venv
+config:
+ pulumi:tags:
+ value:
+ pulumi:template: python
diff --git a/app_python/pulumi/__main__.py b/app_python/pulumi/__main__.py
new file mode 100644
index 0000000000..c04b59080c
--- /dev/null
+++ b/app_python/pulumi/__main__.py
@@ -0,0 +1,105 @@
+"""A Python Pulumi program"""
+
+import pulumi
+import pulumi_aws as aws
+
+config = pulumi.Config()
+region = config.get("region", "us-east-1")
+instance_type = config.get("instance_type", "t2.micro")
+key_name = config.require("key_name")
+allowed_ssh_ip = config.require("allowed_ssh_ip")
+
+ami = aws.ec2.get_ami(
+ most_recent=True,
+ owners=["099720109477"],
+ filters=[
+ {"name": "name", "values": ["ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-server-*"]},
+ {"name": "virtualization-type", "values": ["hvm"]},
+ ],
+)
+
+vpc = aws.ec2.Vpc("lab4-vpc",
+ cidr_block="10.0.0.0/16",
+ enable_dns_support=True,
+ enable_dns_hostnames=True,
+ tags={"Name": "lab4-vpc"}
+)
+
+subnet = aws.ec2.Subnet("lab4-subnet",
+ vpc_id=vpc.id,
+ cidr_block="10.0.1.0/24",
+ map_public_ip_on_launch=True,
+ availability_zone=f"{region}a",
+ tags={"Name": "lab4-subnet"}
+)
+
+igw = aws.ec2.InternetGateway("lab4-igw",
+ vpc_id=vpc.id,
+ tags={"Name": "lab4-igw"}
+)
+
+route_table = aws.ec2.RouteTable("lab4-rt",
+ vpc_id=vpc.id,
+ routes=[aws.ec2.RouteTableRouteArgs(
+ cidr_block="0.0.0.0/0",
+ gateway_id=igw.id,
+ )],
+ tags={"Name": "lab4-rt"}
+)
+
+route_table_assoc = aws.ec2.RouteTableAssociation("lab4-rta",
+ subnet_id=subnet.id,
+ route_table_id=route_table.id
+)
+
+sg = aws.ec2.SecurityGroup("lab4-sg",
+ name="lab4-sg",
+ description="Allow SSH, HTTP, port 5000",
+ vpc_id=vpc.id,
+ ingress=[
+ aws.ec2.SecurityGroupIngressArgs(
+ description="SSH from my IP",
+ from_port=22,
+ to_port=22,
+ protocol="tcp",
+ cidr_blocks=[allowed_ssh_ip],
+ ),
+ aws.ec2.SecurityGroupIngressArgs(
+ description="HTTP",
+ from_port=80,
+ to_port=80,
+ protocol="tcp",
+ cidr_blocks=["0.0.0.0/0"],
+ ),
+ aws.ec2.SecurityGroupIngressArgs(
+ description="Custom port 5000",
+ from_port=5000,
+ to_port=5000,
+ protocol="tcp",
+ cidr_blocks=["0.0.0.0/0"],
+ ),
+ ],
+ egress=[aws.ec2.SecurityGroupEgressArgs(
+ from_port=0,
+ to_port=0,
+ protocol="-1",
+ cidr_blocks=["0.0.0.0/0"],
+ )],
+ tags={"Name": "lab4-sg"}
+)
+
+instance = aws.ec2.Instance("lab4-vm",
+ instance_type=instance_type,
+ ami=ami.id,
+ key_name=key_name,
+ subnet_id=subnet.id,
+ vpc_security_group_ids=[sg.id],
+ root_block_device=aws.ec2.InstanceRootBlockDeviceArgs(
+ volume_size=8,
+ volume_type="gp3",
+ ),
+ tags={"Name": "lab4-vm"}
+)
+
+pulumi.export("public_ip", instance.public_ip)
+pulumi.export("ssh_command", instance.public_ip.apply(lambda ip: f"ssh -i ~/.ssh/labsuser.pem ubuntu@{ip}"))
diff --git a/app_python/pulumi/requirements.txt b/app_python/pulumi/requirements.txt
new file mode 100644
index 0000000000..bc4e43087b
--- /dev/null
+++ b/app_python/pulumi/requirements.txt
@@ -0,0 +1 @@
+pulumi>=3.0.0,<4.0.0
diff --git a/app_python/requirements.txt b/app_python/requirements.txt
new file mode 100644
index 0000000000..cf9d9dac70
--- /dev/null
+++ b/app_python/requirements.txt
@@ -0,0 +1,4 @@
+Flask==3.1.0
+pytest==8.3.4
+python-json-logger
+prometheus-client==0.23.1
diff --git a/app_python/terraform/.gitignore b/app_python/terraform/.gitignore
new file mode 100644
index 0000000000..f26b5daa0d
--- /dev/null
+++ b/app_python/terraform/.gitignore
@@ -0,0 +1,9 @@
+# Terraform
+*.tfstate
+*.tfstate.*
+.terraform/
+terraform.tfvars
+*.tfvars
+.terraform.lock.hcl
+# AWS credentials (if any)
+*.pem
\ No newline at end of file
diff --git a/app_python/terraform/main.tf b/app_python/terraform/main.tf
new file mode 100644
index 0000000000..514943c62a
--- /dev/null
+++ b/app_python/terraform/main.tf
@@ -0,0 +1,123 @@
+terraform {
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = "~> 5.0"
+ }
+ }
+}
+
+data "aws_ami" "ubuntu" {
+ most_recent = true
+ owners = ["099720109477"]
+
+ filter {
+ name = "name"
+ values = ["ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-server-*"]
+ }
+
+ filter {
+ name = "virtualization-type"
+ values = ["hvm"]
+ }
+}
+
+resource "aws_vpc" "main" {
+ cidr_block = "10.0.0.0/16"
+ enable_dns_support = true
+ enable_dns_hostnames = true
+ tags = {
+ Name = "lab4-vpc"
+ }
+}
+
+resource "aws_subnet" "public" {
+ vpc_id = aws_vpc.main.id
+ cidr_block = "10.0.1.0/24"
+ map_public_ip_on_launch = true
+ availability_zone = "${var.region}a"
+ tags = {
+ Name = "lab4-subnet"
+ }
+}
+
+resource "aws_internet_gateway" "igw" {
+ vpc_id = aws_vpc.main.id
+ tags = {
+ Name = "lab4-igw"
+ }
+}
+
+resource "aws_route_table" "public" {
+ vpc_id = aws_vpc.main.id
+ route {
+ cidr_block = "0.0.0.0/0"
+ gateway_id = aws_internet_gateway.igw.id
+ }
+ tags = {
+ Name = "lab4-rt"
+ }
+}
+
+resource "aws_route_table_association" "public" {
+ subnet_id = aws_subnet.public.id
+ route_table_id = aws_route_table.public.id
+}
+
+resource "aws_security_group" "lab4_sg" {
+ name = "lab4-sg"
+ description = "Allow SSH, HTTP, port 5000"
+ vpc_id = aws_vpc.main.id
+
+ ingress {
+ description = "SSH from my IP"
+ from_port = 22
+ to_port = 22
+ protocol = "tcp"
+ cidr_blocks = [var.allowed_ssh_ip]
+ }
+
+ ingress {
+ description = "HTTP"
+ from_port = 80
+ to_port = 80
+ protocol = "tcp"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ ingress {
+ description = "Custom port 5000"
+ from_port = 5000
+ to_port = 5000
+ protocol = "tcp"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ egress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ tags = {
+ Name = "lab4-sg"
+ }
+}
+
+resource "aws_instance" "lab4_vm" {
+ ami = data.aws_ami.ubuntu.id
+ instance_type = var.instance_type
+ key_name = var.key_name
+ subnet_id = aws_subnet.public.id
+ vpc_security_group_ids = [aws_security_group.lab4_sg.id]
+
+ root_block_device {
+ volume_size = 8
+ volume_type = "gp3"
+ }
+
+ tags = {
+ Name = "lab4-vm"
+ }
+}
\ No newline at end of file
diff --git a/app_python/terraform/outputs.tf b/app_python/terraform/outputs.tf
new file mode 100644
index 0000000000..be8c218a85
--- /dev/null
+++ b/app_python/terraform/outputs.tf
@@ -0,0 +1,8 @@
+output "public_ip" {
+ description = "Public IP of the instance"
+ value = aws_instance.lab4_vm.public_ip
+}
+
+output "ssh_command" {
+ value = format("ssh -i ~/.ssh/labsuser.pem ubuntu@%s", aws_instance.lab4_vm.public_ip)
+}
\ No newline at end of file
diff --git a/app_python/terraform/variables.tf b/app_python/terraform/variables.tf
new file mode 100644
index 0000000000..54029ee0c0
--- /dev/null
+++ b/app_python/terraform/variables.tf
@@ -0,0 +1,21 @@
+variable "region" {
+ description = "AWS region"
+ type = string
+ default = "us-east-1"
+}
+
+variable "instance_type" {
+ description = "EC2 instance type"
+ type = string
+ default = "t2.micro"
+}
+
+variable "key_name" {
+ description = "SSH key pair name (vockey)"
+ type = string
+}
+
+variable "allowed_ssh_ip" {
+ description = "Your public IP with /32"
+ type = string
+}
\ No newline at end of file
diff --git a/app_python/tests/__init__.py b/app_python/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/app_python/tests/test_app.py b/app_python/tests/test_app.py
new file mode 100644
index 0000000000..4bc23a3020
--- /dev/null
+++ b/app_python/tests/test_app.py
@@ -0,0 +1,45 @@
+import pytest
+import sys
+import os
+sys.path.insert(0, os.path.abspath(os.path.dirname(__file__) + "/.."))
+from app import app
+
+@pytest.fixture
+def client():
+ app.config['TESTING'] = True
+ with app.test_client() as client:
+ yield client
+
+def test_main_endpoint_status(client):
+ """Test that main endpoint returns 200"""
+ rv = client.get('/')
+ assert rv.status_code == 200
+
+def test_main_endpoint_json_structure(client):
+ """Test that main endpoint contains all required fields"""
+ rv = client.get('/')
+ json_data = rv.get_json()
+ assert 'service' in json_data
+ assert 'system' in json_data
+ assert 'runtime' in json_data
+ assert 'request' in json_data
+ assert 'endpoints' in json_data
+ assert json_data['service']['name'] == 'devops-info-service'
+ assert json_data['service']['framework'] == 'Flask'
+
+def test_health_endpoint(client):
+ """Test /health returns healthy status"""
+ rv = client.get('/health')
+ assert rv.status_code == 200
+ json_data = rv.get_json()
+ assert json_data['status'] == 'healthy'
+ assert 'timestamp' in json_data
+ assert 'uptime_seconds' in json_data
+
+def test_404_error(client):
+ """Test non-existent endpoint returns 404 JSON"""
+ rv = client.get('/nonexistent')
+ assert rv.status_code == 404
+ json_data = rv.get_json()
+ assert 'error' in json_data
+ assert json_data['error'] == 'Not Found'