Compare commits

..

143 Commits

Author SHA1 Message Date
a5fc28111b Merge branch 'master' of https://git.dvirlabs.com/dvirlabs/my-apps 2026-03-01 05:09:41 +02:00
7d0dc5fc37 Update Whatsapp temp access token to parament token 2026-03-01 05:06:19 +02:00
woodpecker-bot
763075096a backend: update tag to master-71b6828 2026-03-01 01:40:21 +00:00
woodpecker-bot
c248d5f572 frontend: update tag to master-71b6828 2026-03-01 01:40:08 +00:00
woodpecker-bot
902df179ee backend: update tag to master-c50544d 2026-03-01 01:20:29 +00:00
8e96150165 Update Whatsapp credentials 2026-03-01 02:51:10 +02:00
de9710c23d Update Whatsapp credentials 2026-03-01 02:49:43 +02:00
woodpecker-bot
8768a5fb69 backend: update tag to master-d338722 2026-03-01 00:33:08 +00:00
woodpecker-bot
362b1e8737 frontend: update tag to master-d338722 2026-03-01 00:32:45 +00:00
6718b50efb Merge branch 'master' of https://git.dvirlabs.com/dvirlabs/my-apps 2026-03-01 02:19:30 +02:00
c0ec6b635e Update google client for invy: 2026-03-01 02:19:11 +02:00
woodpecker-bot
e9ebf79a0f frontend: update tag to master-e0169b8 2026-02-28 23:38:42 +00:00
woodpecker-bot
149c630588 backend: update tag to master-d4270ea 2026-02-28 23:17:33 +00:00
woodpecker-bot
4308113626 frontend: update tag to master-d4270ea 2026-02-28 23:17:16 +00:00
3b55c9171a fix: revert to last known working image tags (backend: b2d800a, frontend: 4e0ae2e) 2026-02-22 15:34:43 +02:00
fa9956419b Merge: resolve conflict keeping imagePullSecrets and correct image tags 2026-02-22 15:34:10 +02:00
5499a3dc10 fix: add imagePullSecrets field 2026-02-22 15:33:50 +02:00
woodpecker-bot
3ee911535a backend: update tag to master-cf7f3ee 2026-02-22 13:18:01 +00:00
woodpecker-bot
a558d326d8 frontend: update tag to master-cf7f3ee 2026-02-22 13:17:33 +00:00
e733d9e558 fix: add serviceAccount config and match postgres to deployed StatefulSet 2026-02-22 15:15:23 +02:00
4c58d13f00 Update tasko 2026-02-22 15:14:10 +02:00
5644e2280b Merge branch 'master' of https://git.dvirlabs.com/dvirlabs/my-apps 2026-02-20 16:01:26 +02:00
45c772c24f Add calink url 2026-02-20 16:01:18 +02:00
woodpecker-bot
9a9b5c5485 backend: update tag to master-e6bd63c 2026-02-20 13:57:08 +00:00
woodpecker-bot
20338c47b0 frontend: update tag to master-e6bd63c 2026-02-20 13:56:59 +00:00
c7a4b55b25 Fix NOTES.txt 2026-02-20 15:53:25 +02:00
ba035ffc09 Add calink app 2026-02-20 15:49:20 +02:00
9202891edf Update my-recipes backend tag 2026-02-05 06:17:57 +02:00
woodpecker-bot
9e6be26651 backend: update tag to master-5051042 2026-02-03 14:49:31 +00:00
woodpecker-bot
77148d35a2 frontend: update tag to master-5051042 2026-02-03 14:49:21 +00:00
woodpecker-bot
f37c0065f3 backend: update tag to master-ed0aedc 2026-02-03 12:49:12 +00:00
woodpecker-bot
b47e0c73d7 frontend: update tag to master-ed0aedc 2026-02-03 12:49:01 +00:00
woodpecker-bot
7d2c19f7c4 frontend: update tag to master-2144b4c 2026-02-02 19:05:27 +00:00
woodpecker-bot
e0b43193f0 frontend: update tag to master-6496b13 2026-02-02 18:47:36 +00:00
55d021c0bb Add admin and password values 2026-02-02 20:45:27 +02:00
93560892f9 Fix DB pod 2026-02-02 20:05:26 +02:00
5d1b32d7c6 Fix DB pod 2026-02-02 19:54:51 +02:00
2c8dc8de71 Fix DB pod 2026-02-02 19:37:27 +02:00
238920ddf3 Fix DB pod 2026-02-02 19:28:58 +02:00
b22b692d61 Add init container to DB 2026-02-02 19:23:02 +02:00
woodpecker-bot
4cc3dabc76 backend: update tag to aws-20491c8 2026-01-26 03:32:08 +00:00
woodpecker-bot
8dde51cc65 backend: update tag to develop-82855cf 2026-01-26 02:41:40 +00:00
woodpecker-bot
5ce7c145b4 backend: update tag to master-0337f70 2026-01-05 11:39:58 +00:00
dvirlabs
8215eaba95 Update navix data path 2026-01-05 12:45:58 +02:00
dvirlabs
e32de28e42 Update storageclass for navix 2026-01-05 12:40:19 +02:00
dvirlabs
631de40737 Add pvc to navix 2026-01-05 12:38:32 +02:00
woodpecker-bot
bfd31d9d4c backend: update tag to develop-e659b82 2026-01-02 13:57:50 +00:00
dvirlabs
7af9c4c214 Fix invy db 2025-12-31 11:44:56 +02:00
woodpecker-bot
33d3a77d28 backend: update tag to master-af515c5 2025-12-31 00:41:11 +00:00
61522f7473 Update scehma sql of invy 2025-12-31 02:33:21 +02:00
ff7ab8b7d0 Update scehma sql of invy 2025-12-31 02:32:24 +02:00
87b8031125 Update scehma sql of invy 2025-12-31 02:25:39 +02:00
a4dbe26d83 Merge branch 'master' of https://git.dvirlabs.com/dvirlabs/my-apps 2025-12-31 02:19:32 +02:00
5ff51b67bf Try to fix google auth 2025-12-31 02:19:23 +02:00
woodpecker-bot
f0dc531919 backend: update tag to master-40f264a 2025-12-31 00:07:37 +00:00
woodpecker-bot
571c9c83f2 frontend: update tag to master-2c5668d 2025-12-31 00:01:31 +00:00
woodpecker-bot
60d17e487c frontend: update tag to master-0c33068 2025-12-30 23:29:52 +00:00
woodpecker-bot
c44cd39219 frontend: update tag to master-1d320f7 2025-12-30 23:11:19 +00:00
c2dd6727d5 Set sa for invy 2025-12-31 00:56:19 +02:00
76c22c5387 Remove imagePullSecret from invy 2025-12-31 00:50:09 +02:00
add56684d9 Try pull latest 2025-12-29 21:02:56 +02:00
woodpecker-bot
41270e8168 backend: update tag to master-ef79895 2025-12-29 18:50:49 +00:00
woodpecker-bot
b6ffdd18e1 frontend: update tag to master-ef79895 2025-12-29 18:50:34 +00:00
a60b4cdafc Update invy valus 2025-12-29 20:49:59 +02:00
05c81f13ce Update invy chart 2025-12-29 20:49:39 +02:00
a736c01260 Update invy app name 2025-12-29 20:45:07 +02:00
3306c7c680 Add invy app 2025-12-29 20:43:56 +02:00
dvirlabs
fe3dd1e208 Update api url of ipify 2025-12-23 21:37:33 +02:00
dvirlabs
0f7affda8d Merge branch 'master' of https://git.dvirlabs.com/dvirlabs/my-apps 2025-12-23 21:33:00 +02:00
dvirlabs
f831e3ec93 Update api url of ipify 2025-12-23 21:32:53 +02:00
woodpecker-bot
f585de2541 frontend: update tag to master-67d217c 2025-12-23 19:29:10 +00:00
dvirlabs
eb299cf1e8 Update ipify chart 2025-12-23 21:27:19 +02:00
dvirlabs
949894fe7f Fix registry for ipify 2025-12-23 21:18:39 +02:00
dvirlabs
a43cca4342 Update namespace for ipify 2025-12-23 21:16:16 +02:00
woodpecker-bot
17039b4e1a backend: update tag to master-894b429 2025-12-23 19:12:24 +00:00
woodpecker-bot
65861479d0 frontend: update tag to master-894b429 2025-12-23 19:12:11 +00:00
woodpecker-bot
465650ac8e backend: update tag to master-3812f09 2025-12-23 19:09:31 +00:00
woodpecker-bot
0d29d152ac frontend: update tag to master-3812f09 2025-12-23 19:09:22 +00:00
dvirlabs
c8ad88b31b ipify 2025-12-23 21:04:21 +02:00
woodpecker-bot
3fce551850 backend: update tag to develop-578fa18 2025-12-21 02:57:01 +00:00
woodpecker-bot
d9783fa019 backend: update tag to develop-538f2ce 2025-12-21 02:43:51 +00:00
7bb08e35e8 Add cred for cloudflared r2 2025-12-21 04:41:00 +02:00
woodpecker-bot
b7d1b8cc02 frontend: update tag to develop-d36f4bc 2025-12-21 02:29:08 +00:00
woodpecker-bot
58a9e00a2f backend: update tag to develop-7fd437e 2025-12-21 02:22:13 +00:00
woodpecker-bot
6df8995027 frontend: update tag to develop-7fd437e 2025-12-21 02:22:05 +00:00
woodpecker-bot
8578ec9115 backend: update tag to develop-41f4a31 2025-12-21 01:48:12 +00:00
woodpecker-bot
6a2d5ef3a5 frontend: update tag to develop-41f4a31 2025-12-21 01:48:04 +00:00
woodpecker-bot
de435c89b4 backend: update tag to develop-d159cad 2025-12-21 00:49:37 +00:00
woodpecker-bot
8c98deee90 frontend: update tag to develop-d159cad 2025-12-21 00:49:28 +00:00
woodpecker-bot
407dce71ab frontend: update tag to develop-c65cce9 2025-12-20 21:27:51 +00:00
woodpecker-bot
07aaa4ce4b frontend: update tag to develop-013d569 2025-12-20 21:03:57 +00:00
woodpecker-bot
ca4bc2e53e frontend: update tag to develop-9f781d7 2025-12-20 20:53:27 +00:00
woodpecker-bot
e00d5928c9 frontend: update tag to develop-f2674c3 2025-12-19 14:27:14 +00:00
woodpecker-bot
729ebbb652 backend: update tag to develop-653e4f0 2025-12-19 14:21:54 +00:00
woodpecker-bot
1bcf7f1737 frontend: update tag to develop-653e4f0 2025-12-19 14:21:47 +00:00
78a867814c Update schema sql 2025-12-19 15:27:07 +02:00
woodpecker-bot
aefd555581 backend: update tag to develop-3270788 2025-12-19 02:16:19 +00:00
woodpecker-bot
05230d830f frontend: update tag to develop-3270788 2025-12-19 02:16:05 +00:00
woodpecker-bot
00380843c8 backend: update tag to master-7a34f5f 2025-12-18 22:02:55 +00:00
woodpecker-bot
df7bd62d29 frontend: update tag to master-7a34f5f 2025-12-18 22:02:46 +00:00
ce7ea192fe Update job migration 2025-12-17 18:31:58 +02:00
b3db63a778 Update job migration 2025-12-17 18:24:50 +02:00
5a3dfc8597 Add job migration 2025-12-17 18:06:42 +02:00
7f0b0c7a3b Add job migration 2025-12-17 17:58:12 +02:00
36e6ab76d7 Fix db schema 2025-12-17 17:21:23 +02:00
cf19c6f2e2 Fix db schema 2025-12-17 17:05:30 +02:00
436832a419 Fix db schema 2025-12-17 17:02:42 +02:00
4b4bf0e57f Fix db schema 2025-12-17 16:58:10 +02:00
72f79df2c8 Update schema.sql 2025-12-17 16:47:39 +02:00
4cb449fdd0 Fix db schema 2025-12-17 16:32:30 +02:00
woodpecker-bot
47b757ec21 backend: update tag to master-a1e6c12 2025-12-17 14:27:17 +00:00
c8c7058837 fix template 2025-12-17 16:13:33 +02:00
woodpecker-bot
a074f23152 frontend: update tag to master-7c2a387 2025-12-17 14:10:14 +00:00
17f5a900d5 fix template 2025-12-17 16:07:25 +02:00
woodpecker-bot
35d3db7400 frontend: update tag to master-a798d2f 2025-12-17 13:54:18 +00:00
230c9fc804 try to fix frontend 2025-12-17 08:03:04 +02:00
32cf8cc602 set the backend to call /health instead /api/health 2025-12-17 07:59:52 +02:00
woodpecker-bot
f67a74c160 backend: update tag to master-557535e 2025-12-17 05:52:18 +00:00
c9a46e8135 Try to fix backend pod 2025-12-17 07:36:41 +02:00
58964c69dc Fix tag for dateme 2025-12-17 07:31:58 +02:00
7aa72023ba Fix url for dateme 2025-12-17 07:21:40 +02:00
e13c6bac3c Fix templates for dateme app 2025-12-17 07:19:28 +02:00
f5cb52c57a Fix values for dateme 2025-12-17 07:15:06 +02:00
woodpecker-bot
48322da811 backend: update tag to master-ddeee8a 2025-12-17 05:06:12 +00:00
woodpecker-bot
42c41ed6b6 frontend: update tag to master-ddeee8a 2025-12-17 05:06:04 +00:00
3d9bcb4ab4 Fix name for argo app dateme 2025-12-17 06:35:58 +02:00
946beadf2f Merge branch 'master' of https://git.dvirlabs.com/dvirlabs/my-apps 2025-12-17 06:34:44 +02:00
a7659198a4 Add dateme app 2025-12-17 06:32:03 +02:00
woodpecker-bot
7a23286c16 frontend: update tag to develop-70f8ce1 2025-12-17 03:46:18 +00:00
woodpecker-bot
4aef8059ff frontend: update tag to develop-1d04352 2025-12-16 19:53:31 +00:00
woodpecker-bot
811cb92d3e frontend: update tag to develop-2fcbcaa 2025-12-16 19:41:47 +00:00
d43480302c Add app-secret 2025-12-14 14:43:02 +02:00
ce187dbc28 Add google auth to my-recipes 2025-12-14 14:37:29 +02:00
woodpecker-bot
0e3769b2c9 backend: update tag to develop-a780534 2025-12-14 12:18:56 +00:00
woodpecker-bot
9997bd36ca backend: update tag to develop-ae9349c 2025-12-14 12:10:48 +00:00
woodpecker-bot
d35f1a6f7b backend: update tag to develop-0afe014 2025-12-14 12:04:24 +00:00
woodpecker-bot
21d1485dca backend: update tag to develop-1da5dc0 2025-12-14 11:46:49 +00:00
woodpecker-bot
e77fc650d5 frontend: update tag to develop-1da5dc0 2025-12-14 11:46:43 +00:00
dvirlabs
8f23ba466c Update schema.sql for my-recipes using db-migration job 2025-12-14 06:50:12 +02:00
dvirlabs
4ec34a920c Update schema.sql for my-recipes using db-migration job 2025-12-14 06:42:55 +02:00
dvirlabs
3b15565a9d Update schema.sql for my-recipes 2025-12-14 06:37:27 +02:00
woodpecker-bot
f036e6e2ce backend: update tag to develop-ba7d0c9 2025-12-14 04:08:24 +00:00
woodpecker-bot
96dc223290 frontend: update tag to develop-ba7d0c9 2025-12-14 04:08:14 +00:00
94 changed files with 4726 additions and 17 deletions

21
argocd-apps/calink.yaml Normal file
View File

@ -0,0 +1,21 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: calink
namespace: argocd
spec:
project: my-apps
source:
repoURL: https://git.dvirlabs.com/dvirlabs/my-apps.git
targetRevision: HEAD
path: charts/calink-chart
helm:
valueFiles:
- ../../manifests/calink/values.yaml
destination:
server: https://kubernetes.default.svc
namespace: my-apps
syncPolicy:
automated:
prune: true
selfHeal: true

21
argocd-apps/dateme.yaml Normal file
View File

@ -0,0 +1,21 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: dateme
namespace: argocd
spec:
project: my-apps
source:
repoURL: https://git.dvirlabs.com/dvirlabs/my-apps.git
targetRevision: HEAD
path: charts/dateme-chart
helm:
valueFiles:
- ../../manifests/dateme/values.yaml
destination:
server: https://kubernetes.default.svc
namespace: my-apps
syncPolicy:
automated:
prune: true
selfHeal: true

21
argocd-apps/invy.yaml Normal file
View File

@ -0,0 +1,21 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: invy
namespace: argocd
spec:
project: my-apps
source:
repoURL: https://git.dvirlabs.com/dvirlabs/my-apps.git
targetRevision: HEAD
path: charts/invy-chart
helm:
valueFiles:
- ../../manifests/invy/values.yaml
destination:
server: https://kubernetes.default.svc
namespace: my-apps
syncPolicy:
automated:
prune: true
selfHeal: true

21
argocd-apps/ipify.yaml Normal file
View File

@ -0,0 +1,21 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ipify
namespace: argocd
spec:
project: my-apps
source:
repoURL: https://git.dvirlabs.com/dvirlabs/my-apps.git
targetRevision: HEAD
path: charts/ipify-chart
helm:
valueFiles:
- ../../manifests/ipify/values.yaml
destination:
server: https://kubernetes.default.svc
namespace: my-apps
syncPolicy:
automated:
prune: true
selfHeal: true

View File

@ -0,0 +1,6 @@
apiVersion: v2
name: calink
description: Calink calendar event generator
type: application
version: 1.0.0
appVersion: "1.0.0"

View File

@ -0,0 +1,43 @@
Thank you for installing {{ .Chart.Name }}!
Your release is named {{ .Release.Name }}.
To learn more about the release, try:
$ helm status {{ .Release.Name }}
$ helm get all {{ .Release.Name }}
{{- if or .Values.frontend.ingress.enabled .Values.backend.ingress.enabled }}
Calink is accessible at:
{{- if .Values.frontend.ingress.enabled }}
Frontend:
{{- range .Values.frontend.ingress.hosts }}
http{{ if $.Values.frontend.ingress.tls }}s{{ end }}://{{ .host }}
{{- end }}
{{- end }}
{{- if .Values.backend.ingress.enabled }}
Backend API:
{{- range .Values.backend.ingress.hosts }}
http{{ if $.Values.backend.ingress.tls }}s{{ end }}://{{ .host }}
{{- end }}
{{- end }}
{{- else }}
To access Calink, forward the ports:
kubectl port-forward svc/{{ include "calink.fullname" . }}-frontend 8080:80
kubectl port-forward svc/{{ include "calink.fullname" . }}-backend 8000:8000
Then visit:
Frontend: http://localhost:8080
Backend: http://localhost:8000
{{- end }}
Backend API documentation is available at:
/docs
Health check endpoint:
/health

View File

@ -0,0 +1,84 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "calink.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
*/}}
{{- define "calink.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "calink.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "calink.labels" -}}
helm.sh/chart: {{ include "calink.chart" . }}
{{ include "calink.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.commonLabels }}
{{ toYaml . }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "calink.selectorLabels" -}}
app.kubernetes.io/name: {{ include "calink.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Backend labels
*/}}
{{- define "calink.backend.labels" -}}
{{ include "calink.labels" . }}
app.kubernetes.io/component: backend
{{- end }}
{{/*
Backend selector labels
*/}}
{{- define "calink.backend.selectorLabels" -}}
{{ include "calink.selectorLabels" . }}
app.kubernetes.io/component: backend
{{- end }}
{{/*
Frontend labels
*/}}
{{- define "calink.frontend.labels" -}}
{{ include "calink.labels" . }}
app.kubernetes.io/component: frontend
{{- end }}
{{/*
Frontend selector labels
*/}}
{{- define "calink.frontend.selectorLabels" -}}
{{ include "calink.selectorLabels" . }}
app.kubernetes.io/component: frontend
{{- end }}

View File

@ -0,0 +1,59 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "calink.fullname" . }}-backend
labels:
{{- include "calink.backend.labels" . | nindent 4 }}
{{- with .Values.commonAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.backend.replicas }}
selector:
matchLabels:
{{- include "calink.backend.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "calink.backend.selectorLabels" . | nindent 8 }}
{{- with .Values.commonAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
containers:
- name: backend
image: "{{ .Values.backend.image.repository }}:{{ .Values.backend.image.tag }}"
imagePullPolicy: {{ .Values.backend.image.pullPolicy }}
ports:
- name: http
containerPort: 8000
protocol: TCP
env:
{{- toYaml .Values.backend.env | nindent 12 }}
{{- if .Values.backend.persistence.enabled }}
volumeMounts:
- name: data
mountPath: {{ .Values.backend.persistence.mountPath }}
{{- end }}
livenessProbe:
httpGet:
path: {{ .Values.backend.healthCheck.path }}
port: http
initialDelaySeconds: {{ .Values.backend.healthCheck.initialDelaySeconds }}
periodSeconds: {{ .Values.backend.healthCheck.periodSeconds }}
readinessProbe:
httpGet:
path: {{ .Values.backend.healthCheck.path }}
port: http
initialDelaySeconds: {{ .Values.backend.healthCheck.initialDelaySeconds }}
periodSeconds: {{ .Values.backend.healthCheck.periodSeconds }}
resources:
{{- toYaml .Values.backend.resources | nindent 12 }}
{{- if .Values.backend.persistence.enabled }}
volumes:
- name: data
persistentVolumeClaim:
claimName: {{ include "calink.fullname" . }}-backend-data
{{- end }}

View File

@ -0,0 +1,46 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "calink.fullname" . }}-frontend
labels:
{{- include "calink.frontend.labels" . | nindent 4 }}
{{- with .Values.commonAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.frontend.replicas }}
selector:
matchLabels:
{{- include "calink.frontend.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "calink.frontend.selectorLabels" . | nindent 8 }}
{{- with .Values.commonAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
containers:
- name: frontend
image: "{{ .Values.frontend.image.repository }}:{{ .Values.frontend.image.tag }}"
imagePullPolicy: {{ .Values.frontend.image.pullPolicy }}
ports:
- name: http
containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
path: {{ .Values.frontend.healthCheck.path }}
port: http
initialDelaySeconds: {{ .Values.frontend.healthCheck.initialDelaySeconds }}
periodSeconds: {{ .Values.frontend.healthCheck.periodSeconds }}
readinessProbe:
httpGet:
path: {{ .Values.frontend.healthCheck.path }}
port: http
initialDelaySeconds: {{ .Values.frontend.healthCheck.initialDelaySeconds }}
periodSeconds: {{ .Values.frontend.healthCheck.periodSeconds }}
resources:
{{- toYaml .Values.frontend.resources | nindent 12 }}

View File

@ -0,0 +1,93 @@
{{- if .Values.frontend.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "calink.fullname" . }}-frontend
labels:
{{- include "calink.labels" . | nindent 4 }}
component: frontend
{{- with .Values.frontend.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.commonAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.frontend.ingress.className }}
ingressClassName: {{ .Values.frontend.ingress.className }}
{{- end }}
rules:
{{- range .Values.frontend.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
name: {{ include "calink.fullname" $ }}-frontend
port:
number: {{ $.Values.frontend.service.port }}
{{- end }}
{{- end }}
{{- if .Values.frontend.ingress.tls }}
tls:
{{- range .Values.frontend.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
{{- end }}
---
{{- if .Values.backend.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "calink.fullname" . }}-backend
labels:
{{- include "calink.labels" . | nindent 4 }}
component: backend
{{- with .Values.backend.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.commonAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.backend.ingress.className }}
ingressClassName: {{ .Values.backend.ingress.className }}
{{- end }}
rules:
{{- range .Values.backend.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
name: {{ include "calink.fullname" $ }}-backend
port:
number: {{ $.Values.backend.service.port }}
{{- end }}
{{- end }}
{{- if .Values.backend.ingress.tls }}
tls:
{{- range .Values.backend.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,21 @@
{{- if .Values.backend.persistence.enabled }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "calink.fullname" . }}-backend-data
labels:
{{- include "calink.backend.labels" . | nindent 4 }}
{{- with .Values.commonAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
accessModes:
- ReadWriteOnce
{{- if .Values.backend.persistence.storageClass }}
storageClassName: {{ .Values.backend.persistence.storageClass }}
{{- end }}
resources:
requests:
storage: {{ .Values.backend.persistence.size }}
{{- end }}

View File

@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "calink.fullname" . }}-backend
labels:
{{- include "calink.backend.labels" . | nindent 4 }}
{{- with .Values.commonAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.backend.service.type }}
ports:
- port: {{ .Values.backend.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "calink.backend.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "calink.fullname" . }}-frontend
labels:
{{- include "calink.frontend.labels" . | nindent 4 }}
{{- with .Values.commonAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.frontend.service.type }}
ports:
- port: {{ .Values.frontend.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "calink.frontend.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,99 @@
nameOverride: ""
fullnameOverride: ""
commonLabels: {}
commonAnnotations: {}
backend:
image:
repository: calink-backend
tag: latest
pullPolicy: IfNotPresent
replicas: 1
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 250m
memory: 256Mi
env:
- name: DATABASE_PATH
value: "/data/app.db"
persistence:
enabled: true
storageClass: ""
size: 1Gi
mountPath: /data
service:
type: ClusterIP
port: 8000
healthCheck:
path: /health
initialDelaySeconds: 10
periodSeconds: 30
ingress:
enabled: true
className: ""
annotations: {}
# traefik.ingress.kubernetes.io/router.entrypoints: websecure
# traefik.ingress.kubernetes.io/router.tls: "true"
# cert-manager.io/cluster-issuer: letsencrypt-prod
hosts:
- host: api-calink.example.com
paths:
- path: /
pathType: Prefix
tls: []
# - secretName: api-calink-tls
# hosts:
# - api-calink.example.com
frontend:
image:
repository: calink-frontend
tag: latest
pullPolicy: IfNotPresent
replicas: 1
resources:
limits:
cpu: 200m
memory: 256Mi
requests:
cpu: 100m
memory: 128Mi
service:
type: ClusterIP
port: 80
healthCheck:
path: /
initialDelaySeconds: 5
periodSeconds: 30
ingress:
enabled: true
className: ""
annotations: {}
# traefik.ingress.kubernetes.io/router.entrypoints: websecure
# traefik.ingress.kubernetes.io/router.tls: "true"
# cert-manager.io/cluster-issuer: letsencrypt-prod
hosts:
- host: calink.example.com
paths:
- path: /
pathType: Prefix
tls: []
# - secretName: calink-tls
# hosts:
# - calink.example.com

View File

@ -0,0 +1,12 @@
apiVersion: v2
name: dating-app
description: MVP dating app Helm chart for Kubernetes deployment
type: application
version: 1.0.0
appVersion: "1.0.0"
keywords:
- dating
- social
- chat
maintainers:
- name: DevOps Team

View File

@ -0,0 +1,201 @@
# Helm Chart README
## Dating App Helm Chart
This Helm chart deploys the MVP dating application to Kubernetes with all necessary components.
### Prerequisites
- Kubernetes 1.19+
- Helm 3.0+
- Nginx Ingress Controller (for ingress)
- Storage provisioner (for PVC)
### Installation
#### Basic Installation (Development)
```bash
# Install with default values
helm install dating-app ./helm/dating-app -n dating-app --create-namespace
```
#### Production Installation with Custom Values
```bash
# Create custom values file
cp helm/dating-app/values.yaml my-values.yaml
# Edit my-values.yaml with your configuration
# Then install
helm install dating-app ./helm/dating-app -n dating-app --create-namespace -f my-values.yaml
```
### Configuration
Edit `values.yaml` to customize:
#### Ingress Hosts
```yaml
backend:
ingress:
host: api.yourdomain.com
frontend:
ingress:
host: app.yourdomain.com
```
#### Database
```yaml
postgres:
credentials:
username: your_user
password: your_password
database: your_db
```
#### Backend Environment
```yaml
backend:
environment:
JWT_SECRET: your-secret-key
CORS_ORIGINS: "https://app.yourdomain.com"
```
#### Frontend API URL
```yaml
frontend:
environment:
VITE_API_URL: "https://api.yourdomain.com"
```
#### Storage Classes
For cloud deployments (AWS, GCP, etc.), specify storage class:
```yaml
backend:
persistence:
storageClass: ebs-sc # AWS EBS
size: 10Gi
postgres:
persistence:
storageClass: ebs-sc
size: 20Gi
```
#### Replicas and Resources
```yaml
backend:
replicas: 3
resources:
requests:
memory: "512Mi"
cpu: "200m"
limits:
memory: "1Gi"
cpu: "500m"
frontend:
replicas: 2
resources:
requests:
memory: "256Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "200m"
```
### Upgrading
```bash
helm upgrade dating-app ./helm/dating-app -f my-values.yaml
```
### Uninstalling
```bash
helm uninstall dating-app -n dating-app
```
### AWS Migration
To deploy to AWS:
1. **RDS for PostgreSQL**: Disable postgres in chart
```yaml
postgres:
enabled: false
```
2. **Update database URL** to RDS endpoint
```yaml
backend:
environment:
DATABASE_URL: "postgresql://user:password@your-rds-endpoint:5432/dating_app"
```
3. **S3 for Media Storage**: Update backend environment
```yaml
backend:
environment:
MEDIA_STORAGE: s3
S3_BUCKET: your-bucket
AWS_REGION: us-east-1
```
4. **Use AWS Load Balancer Controller** for ingress
```yaml
ingress:
className: aws-alb
annotations:
alb.ingress.kubernetes.io/scheme: internet-facing
```
5. **Use EBS for persistent storage**
```yaml
backend:
persistence:
storageClass: ebs-sc
```
### Troubleshooting
Check pod status:
```bash
kubectl get pods -n dating-app
kubectl logs -n dating-app <pod-name>
```
Check services:
```bash
kubectl get svc -n dating-app
```
Check ingress:
```bash
kubectl get ingress -n dating-app
```
Port forward for debugging:
```bash
kubectl port-forward -n dating-app svc/backend 8000:8000
kubectl port-forward -n dating-app svc/frontend 3000:80
```
### Database Initialization
The backend automatically initializes tables on startup. To verify:
```bash
kubectl exec -it -n dating-app <postgres-pod> -- psql -U dating_user -d dating_app -c "\dt"
```
### Notes
- This chart is designed to be portable between on-premises and cloud deployments
- Modify `values.yaml` for your specific infrastructure
- For production, use external secrets management (HashiCorp Vault, AWS Secrets Manager, etc.)
- Enable TLS/SSL with cert-manager for production ingress
- Configure proper backup strategies for PostgreSQL PVC

View File

@ -0,0 +1,80 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-{{ .Values.backend.name }}
namespace: {{ .Values.global.namespace }}
labels:
app: {{ .Release.Name }}-{{ .Values.backend.name }}
component: backend
spec:
replicas: {{ .Values.backend.replicaCount }}
selector:
matchLabels:
app: {{ .Release.Name }}-{{ .Values.backend.name }}
template:
metadata:
labels:
app: {{ .Release.Name }}-{{ .Values.backend.name }}
component: backend
spec:
initContainers:
- name: wait-for-postgres
image: postgres:16-alpine
command:
- /bin/sh
- -c
- |
until pg_isready -h {{ .Release.Name }}-{{ .Values.postgres.name }}-headless -p {{ .Values.postgres.port }}; do
echo "Waiting for PostgreSQL..."
sleep 2
done
echo "PostgreSQL is ready!"
containers:
- name: {{ .Values.backend.name }}
image: "{{ .Values.backend.image.repository }}:{{ .Values.backend.image.tag }}"
imagePullPolicy: {{ .Values.backend.image.pullPolicy }}
ports:
- containerPort: {{ .Values.backend.service.targetPort }}
name: http
protocol: TCP
env:
{{- if .Values.backend.env }}
{{- range $key, $value := .Values.backend.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
{{- end }}
envFrom:
- secretRef:
name: {{ .Release.Name }}-db-credentials
startupProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 15
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 30
livenessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 2
resources:
requests:
cpu: {{ .Values.backend.resources.requests.cpu }}
memory: {{ .Values.backend.resources.requests.memory }}
limits:
cpu: {{ .Values.backend.resources.limits.cpu }}
memory: {{ .Values.backend.resources.limits.memory }}

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-{{ .Values.backend.name }}
namespace: {{ .Values.global.namespace }}
labels:
app: {{ .Release.Name }}-{{ .Values.backend.name }}
component: backend
spec:
type: {{ .Values.backend.service.type }}
selector:
app: {{ .Release.Name }}-{{ .Values.backend.name }}
ports:
- name: http
port: {{ .Values.backend.service.port }}
targetPort: {{ .Values.backend.service.targetPort }}
protocol: TCP

View File

@ -0,0 +1,37 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Release.Name }}-db-migration
namespace: {{ .Values.global.namespace }}
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- name: migrate
image: postgres:16-alpine
command:
- /bin/sh
- -c
- |
echo "Running DB migration: add display_name to profiles..."
psql -h {{ .Release.Name }}-{{ .Values.postgres.name }}-headless -U {{ .Values.postgres.user }} -d {{ .Values.postgres.database }} -c "ALTER TABLE profiles ADD COLUMN IF NOT EXISTS display_name TEXT;"
echo "Running DB migration: add age to profiles..."
psql -h {{ .Release.Name }}-{{ .Values.postgres.name }}-headless -U {{ .Values.postgres.user }} -d {{ .Values.postgres.database }} -c "ALTER TABLE profiles ADD COLUMN IF NOT EXISTS age INTEGER;"
echo "Running DB migration: add gender to profiles..."
psql -h {{ .Release.Name }}-{{ .Values.postgres.name }}-headless -U {{ .Values.postgres.user }} -d {{ .Values.postgres.database }} -c "ALTER TABLE profiles ADD COLUMN IF NOT EXISTS gender TEXT;"
echo "Running DB migration: add location to profiles..."
psql -h {{ .Release.Name }}-{{ .Values.postgres.name }}-headless -U {{ .Values.postgres.user }} -d {{ .Values.postgres.database }} -c "ALTER TABLE profiles ADD COLUMN IF NOT EXISTS location TEXT;"
echo "Running DB migration: add bio to profiles..."
psql -h {{ .Release.Name }}-{{ .Values.postgres.name }}-headless -U {{ .Values.postgres.user }} -d {{ .Values.postgres.database }} -c "ALTER TABLE profiles ADD COLUMN IF NOT EXISTS bio TEXT;"
echo "Running DB migration: add interests to profiles..."
psql -h {{ .Release.Name }}-{{ .Values.postgres.name }}-headless -U {{ .Values.postgres.user }} -d {{ .Values.postgres.database }} -c "ALTER TABLE profiles ADD COLUMN IF NOT EXISTS interests TEXT;"
echo "Running DB migration: add photos to profiles..."
psql -h {{ .Release.Name }}-{{ .Values.postgres.name }}-headless -U {{ .Values.postgres.user }} -d {{ .Values.postgres.database }} -c "ALTER TABLE profiles ADD COLUMN IF NOT EXISTS photos TEXT[];"
echo "Running DB migration: add acknowledged_at to likes..."
psql -h {{ .Release.Name }}-{{ .Values.postgres.name }}-headless -U {{ .Values.postgres.user }} -d {{ .Values.postgres.database }} -c "ALTER TABLE likes ADD COLUMN IF NOT EXISTS acknowledged_at TIMESTAMP;"
echo "Running DB migration: add read_at to messages..."
psql -h {{ .Release.Name }}-{{ .Values.postgres.name }}-headless -U {{ .Values.postgres.user }} -d {{ .Values.postgres.database }} -c "ALTER TABLE messages ADD COLUMN IF NOT EXISTS read_at TIMESTAMP;"
env:
- name: PGPASSWORD
value: {{ .Values.postgres.password | quote }}

View File

@ -0,0 +1,56 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-db-schema
namespace: {{ .Values.global.namespace }}
data:
schema.sql: |
-- Create users table
CREATE TABLE IF NOT EXISTS users (
id SERIAL PRIMARY KEY,
username TEXT UNIQUE NOT NULL,
email TEXT UNIQUE NOT NULL,
hashed_password TEXT NOT NULL,
first_name TEXT,
last_name TEXT,
age INTEGER,
gender TEXT,
bio TEXT,
profile_picture TEXT,
location TEXT,
interests TEXT[] DEFAULT '{}',
is_active BOOLEAN DEFAULT TRUE,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_users_username ON users (username);
CREATE INDEX IF NOT EXISTS idx_users_email ON users (email);
CREATE INDEX IF NOT EXISTS idx_users_is_active ON users (is_active);
-- Profiles table for additional user info
CREATE TABLE IF NOT EXISTS profiles (
id SERIAL PRIMARY KEY,
user_id INTEGER UNIQUE NOT NULL REFERENCES users(id) ON DELETE CASCADE,
verified BOOLEAN DEFAULT FALSE,
verification_token TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_profiles_user_id ON profiles (user_id);
-- Matches/Likes table
CREATE TABLE IF NOT EXISTS matches (
id SERIAL PRIMARY KEY,
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
matched_user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
status TEXT DEFAULT 'pending',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_matches_user_id ON matches (user_id);
CREATE INDEX IF NOT EXISTS idx_matches_matched_user_id ON matches (matched_user_id);
-- Prevent duplicate matches in both directions
CREATE UNIQUE INDEX IF NOT EXISTS idx_matches_unique ON matches
(LEAST(user_id, matched_user_id), GREATEST(user_id, matched_user_id));

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-db-credentials
namespace: {{ .Values.global.namespace }}
type: Opaque
stringData:
DATABASE_URL: postgresql://{{ .Values.postgres.user }}:{{ .Values.postgres.password }}@{{ .Release.Name }}-{{ .Values.postgres.name }}-headless.{{ .Values.global.namespace }}.svc.cluster.local:{{ .Values.postgres.port }}/{{ .Values.postgres.database }}
DB_HOST: {{ printf "%s-%s-headless.%s.svc.cluster.local" .Release.Name .Values.postgres.name .Values.global.namespace }}
DB_PORT: "{{ .Values.postgres.port }}"
DB_NAME: {{ .Values.postgres.database | quote }}
DB_USER: {{ .Values.postgres.user | quote }}
DB_PASSWORD: {{ .Values.postgres.password | quote }}

View File

@ -0,0 +1,35 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-{{ .Values.postgres.name }}-headless
namespace: {{ .Values.global.namespace }}
labels:
app: {{ .Release.Name }}-{{ .Values.postgres.name }}
component: database
spec:
clusterIP: None
selector:
app: {{ .Release.Name }}-{{ .Values.postgres.name }}
ports:
- name: postgres
port: {{ .Values.postgres.port }}
targetPort: {{ .Values.postgres.port }}
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-{{ .Values.postgres.name }}
namespace: {{ .Values.global.namespace }}
labels:
app: {{ .Release.Name }}-{{ .Values.postgres.name }}
component: database
spec:
type: {{ .Values.postgres.service.type }}
selector:
app: {{ .Release.Name }}-{{ .Values.postgres.name }}
ports:
- name: postgres
port: {{ .Values.postgres.service.port }}
targetPort: {{ .Values.postgres.port }}
protocol: TCP

View File

@ -0,0 +1,84 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-{{ .Values.postgres.name }}
namespace: {{ .Values.global.namespace }}
labels:
app: {{ .Release.Name }}-{{ .Values.postgres.name }}
component: database
spec:
serviceName: {{ .Release.Name }}-{{ .Values.postgres.name }}-headless
replicas: 1
selector:
matchLabels:
app: {{ .Release.Name }}-{{ .Values.postgres.name }}
template:
metadata:
labels:
app: {{ .Release.Name }}-{{ .Values.postgres.name }}
component: database
spec:
containers:
- name: postgres
image: "{{ .Values.postgres.image.repository }}:{{ .Values.postgres.image.tag }}"
imagePullPolicy: {{ .Values.postgres.image.pullPolicy }}
ports:
- containerPort: {{ .Values.postgres.port }}
name: postgres
protocol: TCP
env:
- name: POSTGRES_USER
value: {{ .Values.postgres.user | quote }}
- name: POSTGRES_PASSWORD
value: {{ .Values.postgres.password | quote }}
- name: POSTGRES_DB
value: {{ .Values.postgres.database | quote }}
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
volumeMounts:
- name: data
mountPath: /var/lib/postgresql/data
- name: init-sql
mountPath: /docker-entrypoint-initdb.d
livenessProbe:
exec:
command:
- /bin/sh
- -c
- pg_isready -U {{ .Values.postgres.user }}
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
exec:
command:
- /bin/sh
- -c
- pg_isready -U {{ .Values.postgres.user }}
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 2
failureThreshold: 3
resources:
requests:
cpu: {{ .Values.postgres.resources.requests.cpu }}
memory: {{ .Values.postgres.resources.requests.memory }}
limits:
cpu: {{ .Values.postgres.resources.limits.cpu }}
memory: {{ .Values.postgres.resources.limits.memory }}
volumes:
- name: init-sql
configMap:
name: {{ .Release.Name }}-db-schema
defaultMode: 0755
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- {{ .Values.postgres.persistence.accessMode }}
storageClassName: {{ .Values.postgres.persistence.storageClass }}
resources:
requests:
storage: {{ .Values.postgres.persistence.size }}

View File

@ -0,0 +1,71 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-{{ .Values.frontend.name }}
namespace: {{ .Values.global.namespace }}
labels:
app: {{ .Release.Name }}-{{ .Values.frontend.name }}
component: frontend
spec:
replicas: {{ .Values.frontend.replicaCount }}
selector:
matchLabels:
app: {{ .Release.Name }}-{{ .Values.frontend.name }}
template:
metadata:
labels:
app: {{ .Release.Name }}-{{ .Values.frontend.name }}
component: frontend
spec:
initContainers:
- name: wait-for-backend
image: busybox:1.35
command:
- /bin/sh
- -c
- |
echo "Waiting for backend to be ready..."
until wget -q -O- http://{{ .Release.Name }}-{{ .Values.backend.name }}:{{ .Values.backend.service.port }}/health > /dev/null 2>&1; do
echo "Backend not ready, waiting..."
sleep 2
done
echo "Backend is ready!"
containers:
- name: {{ .Values.frontend.name }}
image: "{{ .Values.frontend.image.repository }}:{{ .Values.frontend.image.tag }}"
imagePullPolicy: {{ .Values.frontend.image.pullPolicy }}
ports:
- containerPort: {{ .Values.frontend.service.targetPort }}
name: http
protocol: TCP
volumeMounts:
- name: env-config
mountPath: /usr/share/nginx/html/env.js
subPath: env.js
livenessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 2
resources:
requests:
cpu: {{ .Values.frontend.resources.requests.cpu }}
memory: {{ .Values.frontend.resources.requests.memory }}
limits:
cpu: {{ .Values.frontend.resources.limits.cpu }}
memory: {{ .Values.frontend.resources.limits.memory }}
volumes:
- name: env-config
configMap:
name: {{ .Release.Name }}-frontend-env-config

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-frontend-env-config
namespace: {{ .Values.global.namespace }}
labels:
app: {{ .Release.Name }}-{{ .Values.frontend.name }}
component: frontend
data:
env.js: |
window.__ENV__ = {
API_BASE: "{{ .Values.frontend.env.API_BASE }}"
};

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-{{ .Values.frontend.name }}
namespace: {{ .Values.global.namespace }}
labels:
app: {{ .Release.Name }}-{{ .Values.frontend.name }}
component: frontend
spec:
type: {{ .Values.frontend.service.type }}
selector:
app: {{ .Release.Name }}-{{ .Values.frontend.name }}
ports:
- name: http
port: {{ .Values.frontend.service.port }}
targetPort: {{ .Values.frontend.service.targetPort }}
protocol: TCP

View File

@ -0,0 +1,89 @@
{{- if .Values.frontend.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ .Release.Name }}-frontend
namespace: {{ .Values.global.namespace }}
labels:
app: {{ .Release.Name }}-frontend
component: frontend
{{- with .Values.frontend.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.frontend.ingress.className }}
ingressClassName: {{ .Values.frontend.ingress.className }}
{{- end }}
rules:
{{- range .Values.frontend.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
name: {{ $.Release.Name }}-{{ $.Values.frontend.name }}
port:
number: {{ $.Values.frontend.service.port }}
{{- end }}
{{- end }}
{{- if .Values.frontend.ingress.tls }}
tls:
{{- range .Values.frontend.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
{{- end }}
---
{{- if .Values.backend.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ .Release.Name }}-backend
namespace: {{ .Values.global.namespace }}
labels:
app: {{ .Release.Name }}-backend
component: backend
{{- with .Values.backend.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.backend.ingress.className }}
ingressClassName: {{ .Values.backend.ingress.className }}
{{- end }}
rules:
{{- range .Values.backend.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
name: {{ $.Release.Name }}-{{ $.Values.backend.name }}
port:
number: {{ $.Values.backend.service.port }}
{{- end }}
{{- end }}
{{- if .Values.backend.ingress.tls }}
tls:
{{- range .Values.backend.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,79 @@
---
# Example values for AWS deployment
# Copy to values-aws.yaml and customize with your AWS details
global:
domain: yourdomain.com
# Disable built-in PostgreSQL and use RDS instead
postgres:
enabled: false
backend:
image:
repository: 123456789.dkr.ecr.us-east-1.amazonaws.com/dating-app-backend
tag: latest
pullPolicy: IfNotPresent
replicas: 3
resources:
requests:
memory: "512Mi"
cpu: "200m"
limits:
memory: "1Gi"
cpu: "500m"
service:
port: 8000
type: ClusterIP
ingress:
enabled: true
className: aws-alb
host: api.yourdomain.com
path: /
pathType: Prefix
environment:
# Use RDS endpoint here with updated credentials
DATABASE_URL: "postgresql://dating_app_user:Aa123456@your-rds-endpoint.us-east-1.rds.amazonaws.com:5432/dating_app"
JWT_SECRET: "your-secure-secret-key"
JWT_EXPIRES_MINUTES: "1440"
MEDIA_DIR: /app/media
CORS_ORIGINS: "https://yourdomain.com,https://api.yourdomain.com"
persistence:
enabled: true
size: 20Gi
storageClass: ebs-sc # AWS EBS storage class
mountPath: /app/media
frontend:
image:
repository: 123456789.dkr.ecr.us-east-1.amazonaws.com/dating-app-frontend
tag: latest
pullPolicy: IfNotPresent
replicas: 3
resources:
requests:
memory: "256Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "200m"
service:
port: 80
type: ClusterIP
ingress:
enabled: true
className: aws-alb
host: yourdomain.com
path: /
pathType: Prefix
env:
API_BASE: "https://api.yourdomain.com"
ingress:
enabled: true
className: aws-alb
annotations:
alb.ingress.kubernetes.io/scheme: internet-facing
alb.ingress.kubernetes.io/target-type: ip
cert-manager.io/cluster-issuer: "letsencrypt-prod"
alb.ingress.kubernetes.io/certificate-arn: "arn:aws:acm:us-east-1:123456789:certificate/xxxx"

View File

@ -0,0 +1,80 @@
---
# Example values for development/lab deployment
# Copy to values-dev.yaml and customize
global:
domain: lab.local
postgres:
enabled: true
replicas: 1
persistence:
enabled: true
size: 5Gi
storageClass: "" # Use default storage class
credentials:
username: dating_app_user
password: Aa123456
database: dating_app
backend:
image:
repository: dating-app-backend
tag: latest
pullPolicy: IfNotPresent
replicas: 1
resources:
requests:
memory: "256Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
service:
port: 8000
type: ClusterIP
ingress:
enabled: true
className: nginx
host: api.lab.local
path: /
pathType: Prefix
environment:
JWT_SECRET: dev-secret-key-change-in-production
JWT_EXPIRES_MINUTES: "1440"
MEDIA_DIR: /app/media
CORS_ORIGINS: "http://localhost:5173,http://localhost:3000,http://api.lab.local,http://app.lab.local"
persistence:
enabled: true
size: 5Gi
storageClass: ""
frontend:
image:
repository: dating-app-frontend
tag: latest
pullPolicy: IfNotPresent
replicas: 1
resources:
requests:
memory: "128Mi"
cpu: "50m"
limits:
memory: "256Mi"
cpu: "200m"
service:
port: 80
type: ClusterIP
ingress:
enabled: true
className: nginx
host: app.lab.local
path: /
pathType: Prefix
env:
API_BASE: "http://api.lab.local"
ingress:
enabled: true
className: nginx
annotations: {}

View File

@ -0,0 +1,125 @@
# Default values for dateme-chart
global:
namespace: my-apps
imagePullSecrets: []
# Backend configuration
backend:
name: backend
replicaCount: 1
image:
repository: harbor.dvirlabs.com/my-apps/dateme-backend
pullPolicy: Always
tag: develop-latest
service:
type: ClusterIP
port: 8000
targetPort: 8000
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
env:
PYTHONUNBUFFERED: "1"
ingress:
enabled: true
className: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: api-dateme.dvirlabs.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: api-dateme-tls
hosts:
- api-dateme.dvirlabs.com
# Frontend configuration
frontend:
name: frontend
replicaCount: 1
image:
repository: harbor.dvirlabs.com/my-apps/dateme-frontend
pullPolicy: Always
tag: develop-latest
service:
type: ClusterIP
port: 80
targetPort: 80
env:
API_BASE: "https://api-dateme.dvirlabs.com"
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 200m
memory: 256Mi
ingress:
enabled: true
className: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: dateme.dvirlabs.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: dateme-tls
hosts:
- dateme.dvirlabs.com
externalUrl: "https://dateme.dvirlabs.com"
# PostgreSQL configuration
postgres:
name: db
image:
repository: postgres
tag: "16"
pullPolicy: IfNotPresent
user: dateme_user
password: dateme_password
database: dateme_db
port: 5432
service:
type: ClusterIP
port: 5432
targetPort: 5432
persistence:
enabled: true
accessMode: ReadWriteOnce
storageClass: "nfs-client"
size: 10Gi
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
# Ingress (top-level, disabled - use component-specific ingress instead)
ingress:
enabled: false
className: "traefik"
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: dateme.dvirlabs.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: dateme-tls
hosts:
- dateme.dvirlabs.com

View File

@ -0,0 +1,12 @@
apiVersion: v2
name: invy
description: A Helm chart for Invy - Wedding Guest List Management Application
type: application
version: 1.0.0
appVersion: "1.0.0"
keywords:
- invy
- wedding
- guest-management
maintainers:
- name: dvir

183
charts/invy-chart/README.md Normal file
View File

@ -0,0 +1,183 @@
# Invy Helm Chart
This Helm chart deploys the Invy wedding guest list management application on Kubernetes.
## Prerequisites
- Kubernetes 1.19+
- Helm 3.0+
- Persistent Volume provisioner support in the underlying infrastructure (for PostgreSQL)
- Ingress controller (Traefik recommended)
- Cert-manager (for TLS certificates)
## Components
This chart deploys the following components:
- **Frontend**: React + Vite application (Nginx)
- **Backend**: FastAPI application
- **Database**: PostgreSQL 16
## Installation
### Add the chart repository (if applicable)
```bash
helm repo add invy https://your-helm-repo.com
helm repo update
```
### Install the chart
```bash
# Install with default values
helm install invy ./invy-chart -n my-apps --create-namespace
# Install with custom values
helm install invy ./invy-chart -n my-apps --create-namespace -f custom-values.yaml
```
## Configuration
The following table lists the configurable parameters and their default values.
### Global Parameters
| Parameter | Description | Default |
|-----------|-------------|---------|
| `global.namespace` | Namespace for all resources | `my-apps` |
| `global.imagePullSecrets` | Image pull secrets | `[]` |
### Backend Parameters
| Parameter | Description | Default |
|-----------|-------------|---------|
| `backend.image.repository` | Backend image repository | `harbor.dvirlabs.com/my-apps/invy-backend` |
| `backend.image.tag` | Backend image tag | `latest` |
| `backend.replicaCount` | Number of backend replicas | `1` |
| `backend.service.port` | Backend service port | `8000` |
| `backend.ingress.enabled` | Enable backend ingress | `true` |
| `backend.ingress.hosts[0].host` | Backend ingress hostname | `api-invy.dvirlabs.com` |
### Frontend Parameters
| Parameter | Description | Default |
|-----------|-------------|---------|
| `frontend.image.repository` | Frontend image repository | `harbor.dvirlabs.com/my-apps/invy-frontend` |
| `frontend.image.tag` | Frontend image tag | `latest` |
| `frontend.replicaCount` | Number of frontend replicas | `1` |
| `frontend.service.port` | Frontend service port | `80` |
| `frontend.env.VITE_API_URL` | Backend API URL | `https://api-invy.dvirlabs.com` |
| `frontend.ingress.enabled` | Enable frontend ingress | `true` |
| `frontend.ingress.hosts[0].host` | Frontend ingress hostname | `invy.dvirlabs.com` |
### PostgreSQL Parameters
| Parameter | Description | Default |
|-----------|-------------|---------|
| `postgres.user` | PostgreSQL user | `invy_user` |
| `postgres.password` | PostgreSQL password | `invy_password` |
| `postgres.database` | PostgreSQL database name | `invy_db` |
| `postgres.persistence.enabled` | Enable persistence | `true` |
| `postgres.persistence.size` | Persistent volume size | `10Gi` |
| `postgres.persistence.storageClass` | Storage class | `nfs-client` |
## Building and Pushing Images
### Backend Image
```bash
cd backend
docker build -t harbor.dvirlabs.com/my-apps/invy-backend:latest .
docker push harbor.dvirlabs.com/my-apps/invy-backend:latest
```
### Frontend Image
```bash
cd frontend
docker build --build-arg VITE_API_URL=https://api-invy.dvirlabs.com -t harbor.dvirlabs.com/my-apps/invy-frontend:latest .
docker push harbor.dvirlabs.com/my-apps/invy-frontend:latest
```
## Upgrading
```bash
helm upgrade invy ./invy-chart -n my-apps
```
## Uninstalling
```bash
helm uninstall invy -n my-apps
```
## Customization
Create a `custom-values.yaml` file to override default values:
```yaml
backend:
image:
tag: "v1.0.0"
ingress:
hosts:
- host: api.mycompany.com
paths:
- path: /
pathType: Prefix
frontend:
image:
tag: "v1.0.0"
env:
VITE_API_URL: "https://api.mycompany.com"
ingress:
hosts:
- host: invy.mycompany.com
paths:
- path: /
pathType: Prefix
postgres:
password: "your-secure-password"
persistence:
storageClass: "your-storage-class"
```
Then install with:
```bash
helm install invy ./invy-chart -n my-apps -f custom-values.yaml
```
## Troubleshooting
### Check pod status
```bash
kubectl get pods -n my-apps
```
### View pod logs
```bash
# Backend logs
kubectl logs -n my-apps -l app.kubernetes.io/component=backend
# Frontend logs
kubectl logs -n my-apps -l app.kubernetes.io/component=frontend
# Database logs
kubectl logs -n my-apps -l app.kubernetes.io/component=database
```
### Access the database
```bash
kubectl exec -it -n my-apps invy-db-0 -- psql -U invy_user -d invy_db
```
## Support
For issues and feature requests, please open an issue in the repository.

View File

@ -0,0 +1,38 @@
Thank you for installing {{ .Chart.Name }}!
Your release is named {{ .Release.Name }}.
To learn more about the release, try:
$ helm status {{ .Release.Name }}
$ helm get all {{ .Release.Name }}
{{- if .Values.frontend.ingress.enabled }}
Application URLs:
{{- range .Values.frontend.ingress.hosts }}
Frontend: https://{{ .host }}
{{- end }}
{{- range .Values.backend.ingress.hosts }}
Backend API: https://{{ .host }}
{{- end }}
{{- else }}
To access your application:
1. Get the frontend URL:
export POD_NAME=$(kubectl get pods --namespace {{ .Values.global.namespace }} -l "app.kubernetes.io/name={{ include "invy.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=frontend" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace {{ .Values.global.namespace }} port-forward $POD_NAME 8080:80
echo "Visit http://127.0.0.1:8080 to use your application"
2. Get the backend URL:
export POD_NAME=$(kubectl get pods --namespace {{ .Values.global.namespace }} -l "app.kubernetes.io/name={{ include "invy.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=backend" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace {{ .Values.global.namespace }} port-forward $POD_NAME 8000:8000
echo "Visit http://127.0.0.1:8000 to access the API"
{{- end }}
Database connection:
Host: {{ include "invy.fullname" . }}-db
Port: {{ .Values.postgres.port }}
Database: {{ .Values.postgres.database }}
User: {{ .Values.postgres.user }}

View File

@ -0,0 +1,60 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "invy.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
*/}}
{{- define "invy.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "invy.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "invy.labels" -}}
helm.sh/chart: {{ include "invy.chart" . }}
{{ include "invy.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "invy.selectorLabels" -}}
app.kubernetes.io/name: {{ include "invy.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "invy.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "invy.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,100 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "invy.fullname" . }}-backend
labels:
{{- include "invy.labels" . | nindent 4 }}
app.kubernetes.io/component: backend
spec:
replicas: {{ .Values.backend.replicaCount }}
selector:
matchLabels:
{{- include "invy.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: backend
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "invy.selectorLabels" . | nindent 8 }}
app.kubernetes.io/component: backend
spec:
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "invy.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
initContainers:
- name: wait-for-postgres
image: busybox:1.35
command: ['sh', '-c', 'until nc -z {{ include "invy.fullname" . }}-db-headless {{ .Values.postgres.port | default 5432 }}; do echo waiting for postgres; sleep 2; done;']
containers:
- name: backend
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.backend.image.repository }}:{{ .Values.backend.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.backend.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.backend.service.targetPort }}
protocol: TCP
env:
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: {{ include "invy.fullname" . }}-secrets
key: database-url
- name: GOOGLE_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ include "invy.fullname" . }}-secrets
key: google-client-id
- name: GOOGLE_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ include "invy.fullname" . }}-secrets
key: google-client-secret
- name: WHATSAPP_ACCESS_TOKEN
valueFrom:
secretKeyRef:
name: {{ include "invy.fullname" . }}-secrets
key: whatsapp-access-token
- name: WHATSAPP_PHONE_NUMBER_ID
valueFrom:
secretKeyRef:
name: {{ include "invy.fullname" . }}-secrets
key: whatsapp-phone-number-id
{{- range $key, $value := .Values.backend.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
livenessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 10
periodSeconds: 5
resources:
{{- toYaml .Values.backend.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,42 @@
{{- if .Values.backend.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "invy.fullname" . }}-backend
labels:
{{- include "invy.labels" . | nindent 4 }}
app.kubernetes.io/component: backend
{{- with .Values.backend.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.backend.ingress.className }}
ingressClassName: {{ .Values.backend.ingress.className }}
{{- end }}
{{- if .Values.backend.ingress.tls }}
tls:
{{- range .Values.backend.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.backend.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
name: {{ include "invy.fullname" $ }}-backend
port:
number: {{ $.Values.backend.service.port }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "invy.fullname" . }}-backend
labels:
{{- include "invy.labels" . | nindent 4 }}
app.kubernetes.io/component: backend
spec:
type: {{ .Values.backend.service.type }}
ports:
- port: {{ .Values.backend.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "invy.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: backend

View File

@ -0,0 +1,126 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "invy.fullname" . }}-db-schema
labels:
{{- include "invy.labels" . | nindent 4 }}
app.kubernetes.io/component: database
data:
init.sql: |
-- Invy — Full Database Init Schema
-- Runs only on a FRESH (empty) data directory.
-- For existing production DBs run migrate_production.sql manually.
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
-- ── Users ──────────────────────────────────────────────────────────────
CREATE TABLE IF NOT EXISTS users (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
email TEXT NOT NULL UNIQUE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
-- ── Events ─────────────────────────────────────────────────────────────
CREATE TABLE IF NOT EXISTS events (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
name TEXT NOT NULL,
date TIMESTAMP WITH TIME ZONE,
location TEXT,
partner1_name TEXT,
partner2_name TEXT,
venue TEXT,
event_time TEXT,
guest_link TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_events_created_at ON events(created_at);
CREATE INDEX IF NOT EXISTS idx_events_guest_link ON events(guest_link);
-- ── Event members (authorization) ──────────────────────────────────────
CREATE TABLE IF NOT EXISTS event_members (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
event_id UUID NOT NULL REFERENCES events(id) ON DELETE CASCADE,
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
role TEXT NOT NULL DEFAULT 'admin'
CHECK (role IN ('admin', 'editor', 'viewer')),
display_name TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
UNIQUE(event_id, user_id)
);
CREATE INDEX IF NOT EXISTS idx_event_members_event_id ON event_members(event_id);
CREATE INDEX IF NOT EXISTS idx_event_members_user_id ON event_members(user_id);
CREATE INDEX IF NOT EXISTS idx_event_members_event_user ON event_members(event_id, user_id);
-- ── Guests v2 ──────────────────────────────────────────────────────────
CREATE TABLE IF NOT EXISTS guests_v2 (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
event_id UUID NOT NULL REFERENCES events(id) ON DELETE CASCADE,
added_by_user_id UUID NOT NULL REFERENCES users(id),
first_name TEXT NOT NULL,
last_name TEXT NOT NULL DEFAULT '',
email TEXT,
phone TEXT,
phone_number TEXT,
rsvp_status TEXT NOT NULL DEFAULT 'invited'
CHECK (rsvp_status IN ('invited', 'confirmed', 'declined')),
meal_preference TEXT,
has_plus_one BOOLEAN DEFAULT FALSE,
plus_one_name TEXT,
table_number TEXT,
side TEXT,
owner_email TEXT,
source TEXT NOT NULL DEFAULT 'manual'
CHECK (source IN ('google', 'manual', 'self-service')),
notes TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_guests_v2_event_id ON guests_v2(event_id);
CREATE INDEX IF NOT EXISTS idx_guests_v2_added_by ON guests_v2(added_by_user_id);
CREATE INDEX IF NOT EXISTS idx_guests_v2_phone_number ON guests_v2(phone_number);
CREATE INDEX IF NOT EXISTS idx_guests_v2_event_phone ON guests_v2(event_id, phone_number);
CREATE INDEX IF NOT EXISTS idx_guests_v2_event_status ON guests_v2(event_id, rsvp_status);
CREATE INDEX IF NOT EXISTS idx_guests_v2_owner_email ON guests_v2(event_id, owner_email);
CREATE INDEX IF NOT EXISTS idx_guests_v2_source ON guests_v2(event_id, source);
-- ── RSVP tokens ────────────────────────────────────────────────────────
CREATE TABLE IF NOT EXISTS rsvp_tokens (
token TEXT PRIMARY KEY,
event_id UUID NOT NULL REFERENCES events(id) ON DELETE CASCADE,
guest_id UUID REFERENCES guests_v2(id) ON DELETE SET NULL,
phone TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
expires_at TIMESTAMP WITH TIME ZONE,
used_at TIMESTAMP WITH TIME ZONE
);
CREATE INDEX IF NOT EXISTS idx_rsvp_tokens_event_id ON rsvp_tokens(event_id);
CREATE INDEX IF NOT EXISTS idx_rsvp_tokens_guest_id ON rsvp_tokens(guest_id);
-- ── updated_at trigger ─────────────────────────────────────────────────
CREATE OR REPLACE FUNCTION _update_updated_at()
RETURNS TRIGGER LANGUAGE plpgsql AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$;
DO $$ BEGIN
CREATE TRIGGER trg_guests_v2_updated_at
BEFORE UPDATE ON guests_v2
FOR EACH ROW EXECUTE FUNCTION _update_updated_at();
EXCEPTION WHEN duplicate_object THEN NULL; END $$;
DO $$ BEGIN
CREATE TRIGGER trg_events_updated_at
BEFORE UPDATE ON events
FOR EACH ROW EXECUTE FUNCTION _update_updated_at();
EXCEPTION WHEN duplicate_object THEN NULL; END $$;

View File

@ -0,0 +1,36 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "invy.fullname" . }}-db
labels:
{{- include "invy.labels" . | nindent 4 }}
app.kubernetes.io/component: database
spec:
type: {{ .Values.postgres.service.type }}
ports:
- port: {{ .Values.postgres.service.port }}
targetPort: postgres
protocol: TCP
name: postgres
selector:
{{- include "invy.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: database
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "invy.fullname" . }}-db-headless
labels:
{{- include "invy.labels" . | nindent 4 }}
app.kubernetes.io/component: database
spec:
type: ClusterIP
clusterIP: None
ports:
- port: {{ .Values.postgres.service.port }}
targetPort: postgres
protocol: TCP
name: postgres
selector:
{{- include "invy.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: database

View File

@ -0,0 +1,131 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "invy.fullname" . }}-db
labels:
{{- include "invy.labels" . | nindent 4 }}
app.kubernetes.io/component: database
spec:
serviceName: {{ include "invy.fullname" . }}-db-headless
replicas: 1
selector:
matchLabels:
{{- include "invy.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: database
template:
metadata:
labels:
{{- include "invy.selectorLabels" . | nindent 8 }}
app.kubernetes.io/component: database
spec:
securityContext:
fsGroup: 999
initContainers:
- name: fix-permissions
image: busybox:latest
command:
- sh
- -c
- |
chown -R 999:999 /var/lib/postgresql/data || true
chmod 700 /var/lib/postgresql/data || true
volumeMounts:
- name: postgres-data
mountPath: /var/lib/postgresql/data
securityContext:
runAsUser: 0
containers:
- name: postgres
securityContext:
runAsUser: 999
runAsNonRoot: true
image: "{{ .Values.postgres.image.repository }}:{{ .Values.postgres.image.tag }}"
imagePullPolicy: {{ .Values.postgres.image.pullPolicy }}
ports:
- name: postgres
containerPort: {{ .Values.postgres.port }}
protocol: TCP
env:
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: {{ include "invy.fullname" . }}-secrets
key: postgres-user
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "invy.fullname" . }}-secrets
key: postgres-password
- name: POSTGRES_DB
valueFrom:
secretKeyRef:
name: {{ include "invy.fullname" . }}-secrets
key: postgres-database
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
volumeMounts:
- name: postgres-data
mountPath: /var/lib/postgresql/data
- name: postgres-run
mountPath: /var/run/postgresql
- name: init-script
mountPath: /docker-entrypoint-initdb.d
resources:
{{- toYaml .Values.postgres.resources | nindent 12 }}
# Health probes aligned with tasko-chart approach
# Use TCP localhost instead of Unix socket to avoid permission issues
# Fixed shell variable expansion: $POSTGRES_USER not $(POSTGRES_USER)
# Specify database name to avoid "database invy_user does not exist" error
startupProbe:
exec:
command:
- sh
- -c
- pg_isready -h 127.0.0.1 -p 5432 -U "$POSTGRES_USER" -d "$POSTGRES_DB"
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 5
failureThreshold: 30 # Allow up to 150s for slow NFS startup
livenessProbe:
exec:
command:
- sh
- -c
- pg_isready -h 127.0.0.1 -p 5432 -U "$POSTGRES_USER" -d "$POSTGRES_DB"
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
exec:
command:
- sh
- -c
- pg_isready -h 127.0.0.1 -p 5432 -U "$POSTGRES_USER" -d "$POSTGRES_DB"
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 3
volumes:
- name: init-script
configMap:
name: {{ include "invy.fullname" . }}-db-schema
- name: postgres-run
emptyDir: {}
{{- if .Values.postgres.persistence.enabled }}
volumeClaimTemplates:
- metadata:
name: postgres-data
spec:
accessModes:
- {{ .Values.postgres.persistence.accessMode }}
{{- if .Values.postgres.persistence.storageClass }}
storageClassName: {{ .Values.postgres.persistence.storageClass }}
{{- end }}
resources:
requests:
storage: {{ .Values.postgres.persistence.size }}
{{- else }}
- name: postgres-data
emptyDir: {}
{{- end }}

View File

@ -0,0 +1,77 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "invy.fullname" . }}-frontend
labels:
{{- include "invy.labels" . | nindent 4 }}
app.kubernetes.io/component: frontend
spec:
replicas: {{ .Values.frontend.replicaCount }}
selector:
matchLabels:
{{- include "invy.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: frontend
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "invy.selectorLabels" . | nindent 8 }}
app.kubernetes.io/component: frontend
spec:
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "invy.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: frontend
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.frontend.image.repository }}:{{ .Values.frontend.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.frontend.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.frontend.service.targetPort }}
protocol: TCP
{{- if .Values.frontend.env }}
env:
{{- range $key, $value := .Values.frontend.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
- name: VITE_ADMIN_USERNAME
value: {{ .Values.frontend.adminUsername | quote }}
- name: VITE_ADMIN_PASSWORD
value: {{ .Values.frontend.adminPassword | quote }}
{{- end }}
livenessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 10
periodSeconds: 5
resources:
{{- toYaml .Values.frontend.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,42 @@
{{- if .Values.frontend.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "invy.fullname" . }}-frontend
labels:
{{- include "invy.labels" . | nindent 4 }}
app.kubernetes.io/component: frontend
{{- with .Values.frontend.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.frontend.ingress.className }}
ingressClassName: {{ .Values.frontend.ingress.className }}
{{- end }}
{{- if .Values.frontend.ingress.tls }}
tls:
{{- range .Values.frontend.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.frontend.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
name: {{ include "invy.fullname" $ }}-frontend
port:
number: {{ $.Values.frontend.service.port }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "invy.fullname" . }}-frontend
labels:
{{- include "invy.labels" . | nindent 4 }}
app.kubernetes.io/component: frontend
spec:
type: {{ .Values.frontend.service.type }}
ports:
- port: {{ .Values.frontend.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "invy.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: frontend

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "invy.fullname" . }}-secrets
labels:
{{- include "invy.labels" . | nindent 4 }}
type: Opaque
stringData:
postgres-user: {{ .Values.postgres.user | quote }}
postgres-password: {{ .Values.postgres.password | quote }}
postgres-database: {{ .Values.postgres.database | quote }}
database-url: "postgresql://{{ .Values.postgres.user }}:{{ .Values.postgres.password }}@{{ include "invy.fullname" . }}-db:{{ .Values.postgres.port }}/{{ .Values.postgres.database }}"
google-client-id: {{ .Values.backend.googleClientId | quote }}
google-client-secret: {{ .Values.backend.googleClientSecret | quote }}
whatsapp-access-token: {{ .Values.backend.whatsappAccessToken | quote }}
whatsapp-phone-number-id: {{ .Values.backend.whatsappPhoneNumberId | quote }}

View File

@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "invy.serviceAccountName" . }}
labels:
{{- include "invy.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,180 @@
global:
namespace: my-apps
imagePullSecrets: []
# Backend configuration
backend:
name: backend
replicaCount: 1
image:
repository: harbor.dvirlabs.com/my-apps/invy-backend
pullPolicy: IfNotPresent
tag: "latest"
service:
type: ClusterIP
port: 8000
targetPort: 8000
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
env:
PYTHONUNBUFFERED: "1"
GOOGLE_REDIRECT_URI: "https://api-invy.dvirlabs.com/auth/google/callback"
FRONTEND_URL: "https://invy.dvirlabs.com"
# Google OAuth credentials (set these values!)
googleClientId: "YOUR_GOOGLE_CLIENT_ID"
googleClientSecret: "YOUR_GOOGLE_CLIENT_SECRET"
# WhatsApp Cloud API credentials (set these values!)
whatsappAccessToken: "YOUR_WHATSAPP_ACCESS_TOKEN"
whatsappPhoneNumberId: "YOUR_WHATSAPP_PHONE_NUMBER_ID"
ingress:
enabled: true
className: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: api-invy.dvirlabs.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: api-invy-tls
hosts:
- api-invy.dvirlabs.com
# Frontend configuration
frontend:
name: frontend
replicaCount: 1
image:
repository: harbor.dvirlabs.com/my-apps/invy-frontend
pullPolicy: IfNotPresent
tag: "latest"
service:
type: ClusterIP
port: 80
targetPort: 80
env:
VITE_API_URL: "https://api-invy.dvirlabs.com"
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 200m
memory: 256Mi
ingress:
enabled: true
className: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: invy.dvirlabs.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: invy-tls
hosts:
- invy.dvirlabs.com
# PostgreSQL configuration
postgres:
name: db
image:
repository: postgres
tag: "16-alpine"
pullPolicy: IfNotPresent
user: invy_user
password: invy_password
database: invy_db
port: 5432
service:
type: ClusterIP
port: 5432
targetPort: 5432
persistence:
enabled: true
accessMode: ReadWriteOnce
storageClass: "nfs-client"
size: 10Gi
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 1000m
memory: 1Gi
# Ingress configuration
ingress:
enabled: false # Individual frontend/backend ingress resources handle routing instead
className: "traefik"
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: invy.dvirlabs.com
paths:
- path: /
pathType: Prefix
backend: frontend
tls:
- secretName: invy-tls
hosts:
- invy.dvirlabs.com
# Service Account
serviceAccount:
create: true
annotations: {}
name: ""
# Pod annotations
podAnnotations: {}
# Pod security context
podSecurityContext: {}
# fsGroup: 2000
# Container security context
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# Node selector
nodeSelector: {}
# Tolerations
tolerations: []
# Affinity
affinity: {}
# Replica count (default for both frontend and backend if not specified)
replicaCount: 1

View File

@ -0,0 +1,6 @@
apiVersion: v2
name: ipify
description: A Helm chart for IP Subnet Calculator application
type: application
version: 1.0.0
appVersion: "1.0.0"

View File

@ -0,0 +1,20 @@
{{- define "ipify.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- define "ipify.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{- define "ipify.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}

View File

@ -0,0 +1,41 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.backend.name }}
labels:
app: {{ .Values.backend.name }}
component: backend
spec:
replicas: {{ .Values.backend.replicaCount }}
selector:
matchLabels:
app: {{ .Values.backend.name }}
component: backend
template:
metadata:
labels:
app: {{ .Values.backend.name }}
component: backend
spec:
containers:
- name: backend
image: "{{ .Values.backend.image.repository }}:{{ .Values.backend.image.tag }}"
imagePullPolicy: {{ .Values.backend.image.pullPolicy }}
ports:
- containerPort: {{ .Values.backend.service.targetPort }}
name: http
protocol: TCP
resources:
{{- toYaml .Values.backend.resources | nindent 10 }}
livenessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 10
periodSeconds: 5

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.backend.name }}
labels:
app: {{ .Values.backend.name }}
component: backend
spec:
type: {{ .Values.backend.service.type }}
ports:
- port: {{ .Values.backend.service.port }}
targetPort: {{ .Values.backend.service.targetPort }}
protocol: TCP
name: http
selector:
app: {{ .Values.backend.name }}
component: backend

View File

@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.frontend.name }}
labels:
app: {{ .Values.frontend.name }}
component: frontend
spec:
replicas: {{ .Values.frontend.replicaCount }}
selector:
matchLabels:
app: {{ .Values.frontend.name }}
component: frontend
template:
metadata:
labels:
app: {{ .Values.frontend.name }}
component: frontend
spec:
containers:
- name: frontend
image: "{{ .Values.frontend.image.repository }}:{{ .Values.frontend.image.tag }}"
imagePullPolicy: {{ .Values.frontend.image.pullPolicy }}
ports:
- containerPort: {{ .Values.frontend.service.targetPort }}
name: http
protocol: TCP
env:
- name: VITE_API_URL
value: {{ .Values.env.backendUrl | quote }}
resources:
{{- toYaml .Values.frontend.resources | nindent 10 }}
livenessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 10
periodSeconds: 5

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.frontend.name }}
labels:
app: {{ .Values.frontend.name }}
component: frontend
spec:
type: {{ .Values.frontend.service.type }}
ports:
- port: {{ .Values.frontend.service.port }}
targetPort: {{ .Values.frontend.service.targetPort }}
protocol: TCP
name: http
selector:
app: {{ .Values.frontend.name }}
component: frontend

View File

@ -0,0 +1,43 @@
{{- if .Values.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ipify-ingress
annotations:
{{- range $key, $value := .Values.ingress.annotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
spec:
ingressClassName: {{ .Values.ingress.className }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
{{- if eq .backend "backend" }}
name: {{ $.Values.backend.name }}
port:
number: {{ $.Values.backend.service.port }}
{{- else }}
name: {{ $.Values.frontend.name }}
port:
number: {{ $.Values.frontend.service.port }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,66 @@
# Default values for ipify
# Backend configuration
backend:
name: ipify-backend
replicaCount: 1
image:
repository: ipify-backend
tag: latest
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 8000
targetPort: 8000
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 250m
memory: 256Mi
# Frontend configuration
frontend:
name: ipify-frontend
replicaCount: 1
image:
repository: ipify-frontend
tag: latest
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 80
targetPort: 80
resources:
limits:
cpu: 200m
memory: 256Mi
requests:
cpu: 100m
memory: 128Mi
# Ingress configuration
ingress:
enabled: true
className: nginx
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: ipify.example.com
paths:
- path: /api
pathType: Prefix
backend: backend
- path: /
pathType: Prefix
backend: frontend
tls:
- secretName: ipify-tls
hosts:
- ipify.example.com
# Environment variables
env:
# Backend URL for frontend to use
backendUrl: "https://ipify.example.com/api"

View File

@ -0,0 +1,45 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-add-missing-tables
namespace: {{ .Values.global.namespace }}
data:
add-tables.sql: |
-- Create grocery lists table
CREATE TABLE IF NOT EXISTS grocery_lists (
id SERIAL PRIMARY KEY,
name TEXT NOT NULL,
items TEXT[] NOT NULL DEFAULT '{}',
owner_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
is_pinned BOOLEAN DEFAULT FALSE,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Create grocery list shares table
CREATE TABLE IF NOT EXISTS grocery_list_shares (
id SERIAL PRIMARY KEY,
list_id INTEGER NOT NULL REFERENCES grocery_lists(id) ON DELETE CASCADE,
shared_with_user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
can_edit BOOLEAN DEFAULT FALSE,
shared_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
UNIQUE(list_id, shared_with_user_id)
);
CREATE INDEX IF NOT EXISTS idx_grocery_lists_owner_id ON grocery_lists (owner_id);
CREATE INDEX IF NOT EXISTS idx_grocery_list_shares_list_id ON grocery_list_shares (list_id);
CREATE INDEX IF NOT EXISTS idx_grocery_list_shares_user_id ON grocery_list_shares (shared_with_user_id);
-- Create notifications table
CREATE TABLE IF NOT EXISTS notifications (
id SERIAL PRIMARY KEY,
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
type TEXT NOT NULL,
message TEXT NOT NULL,
related_id INTEGER,
is_read BOOLEAN DEFAULT FALSE,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_notifications_user_id ON notifications (user_id);
CREATE INDEX IF NOT EXISTS idx_notifications_is_read ON notifications (is_read);

View File

@ -0,0 +1,49 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Release.Name }}-add-missing-tables
namespace: {{ .Values.global.namespace }}
annotations:
"helm.sh/hook": post-upgrade
"helm.sh/hook-weight": "6"
"helm.sh/hook-delete-policy": before-hook-creation
spec:
template:
spec:
restartPolicy: Never
containers:
- name: add-tables
image: postgres:16-alpine
env:
- name: PGHOST
value: {{ .Release.Name }}-db
- name: PGPORT
value: "{{ .Values.postgres.port }}"
- name: PGDATABASE
value: {{ .Values.postgres.database }}
- name: PGUSER
value: {{ .Values.postgres.user }}
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-db-credentials
key: DB_PASSWORD
command:
- sh
- -c
- |
echo "Waiting for database to be ready..."
until pg_isready -h $PGHOST -p $PGPORT -U $PGUSER; do
echo "Database not ready, waiting..."
sleep 2
done
echo "Database ready, adding missing tables..."
psql -v ON_ERROR_STOP=1 -f /sql/add-tables.sql
echo "Tables added successfully!"
volumeMounts:
- name: sql
mountPath: /sql
volumes:
- name: sql
configMap:
name: {{ .Release.Name }}-add-missing-tables

View File

@ -0,0 +1,33 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-app-secrets
namespace: {{ .Values.global.namespace }}
type: Opaque
stringData:
# Google OAuth
GOOGLE_CLIENT_ID: {{ .Values.oauth.google.clientId | quote }}
GOOGLE_CLIENT_SECRET: {{ .Values.oauth.google.clientSecret | quote }}
GOOGLE_REDIRECT_URI: {{ .Values.oauth.google.redirectUri | quote }}
# Microsoft Entra ID (Azure AD) OAuth
AZURE_CLIENT_ID: {{ .Values.oauth.azure.clientId | quote }}
AZURE_CLIENT_SECRET: {{ .Values.oauth.azure.clientSecret | quote }}
AZURE_TENANT_ID: {{ .Values.oauth.azure.tenantId | quote }}
AZURE_REDIRECT_URI: {{ .Values.oauth.azure.redirectUri | quote }}
# Email Configuration
SMTP_HOST: {{ .Values.email.smtpHost | quote }}
SMTP_PORT: {{ .Values.email.smtpPort | quote }}
SMTP_USER: {{ .Values.email.smtpUser | quote }}
SMTP_PASSWORD: {{ .Values.email.smtpPassword | quote }}
SMTP_FROM: {{ .Values.email.smtpFrom | quote }}
# Frontend URL for redirects
FRONTEND_URL: {{ .Values.frontend.externalUrl | quote }}
# R2 Backup Configuration
R2_ENDPOINT: {{ .Values.r2.endpoint | quote }}
R2_ACCESS_KEY: {{ .Values.r2.accessKey | quote }}
R2_SECRET_KEY: {{ .Values.r2.secretKey | quote }}
BACKUP_INTERVAL: {{ .Values.r2.backupInterval | quote }}

View File

@ -79,6 +79,8 @@ spec:
envFrom:
- secretRef:
name: {{ .Release.Name }}-db-credentials
- secretRef:
name: {{ .Release.Name }}-app-secrets
startupProbe:
httpGet:
path: /docs

View File

@ -30,6 +30,17 @@ data:
END IF;
END $$;
-- Add auth_provider column to users if it doesn't exist
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM information_schema.columns
WHERE table_name = 'users' AND column_name = 'auth_provider'
) THEN
ALTER TABLE users ADD COLUMN auth_provider TEXT DEFAULT 'local';
END IF;
END $$;
-- Verify recipes schema
SELECT column_name, data_type
FROM information_schema.columns

View File

@ -14,6 +14,8 @@ data:
first_name TEXT,
last_name TEXT,
display_name TEXT NOT NULL,
is_admin BOOLEAN DEFAULT FALSE,
auth_provider TEXT DEFAULT 'local',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
@ -81,3 +83,52 @@ data:
ALTER TABLE users ALTER COLUMN display_name SET NOT NULL;
END IF;
END $$;
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM information_schema.columns
WHERE table_name = 'users' AND column_name = 'is_admin'
) THEN
ALTER TABLE users ADD COLUMN is_admin BOOLEAN DEFAULT FALSE;
END IF;
END $$;
-- Create grocery lists table
CREATE TABLE IF NOT EXISTS grocery_lists (
id SERIAL PRIMARY KEY,
name TEXT NOT NULL,
items TEXT[] NOT NULL DEFAULT '{}',
owner_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
is_pinned BOOLEAN DEFAULT FALSE,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Create grocery list shares table
CREATE TABLE IF NOT EXISTS grocery_list_shares (
id SERIAL PRIMARY KEY,
list_id INTEGER NOT NULL REFERENCES grocery_lists(id) ON DELETE CASCADE,
shared_with_user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
can_edit BOOLEAN DEFAULT FALSE,
shared_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
UNIQUE(list_id, shared_with_user_id)
);
CREATE INDEX IF NOT EXISTS idx_grocery_lists_owner_id ON grocery_lists (owner_id);
CREATE INDEX IF NOT EXISTS idx_grocery_list_shares_list_id ON grocery_list_shares (list_id);
CREATE INDEX IF NOT EXISTS idx_grocery_list_shares_user_id ON grocery_list_shares (shared_with_user_id);
-- Create notifications table
CREATE TABLE IF NOT EXISTS notifications (
id SERIAL PRIMARY KEY,
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
type TEXT NOT NULL,
message TEXT NOT NULL,
related_id INTEGER,
is_read BOOLEAN DEFAULT FALSE,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_notifications_user_id ON notifications (user_id);
CREATE INDEX IF NOT EXISTS idx_notifications_is_read ON notifications (is_read);

View File

@ -26,6 +26,9 @@ backend:
env:
PYTHONUNBUFFERED: "1"
# Secrets are created in db-secret.yaml
# These are passed via envFrom secretRef
ingress:
enabled: true
@ -85,6 +88,7 @@ frontend:
- secretName: my-recipes-tls
hosts:
- my-recipes.dvirlabs.com
externalUrl: "https://my-recipes.dvirlabs.com"
# PostgreSQL configuration
postgres:
@ -118,6 +122,34 @@ postgres:
cpu: 1000m
memory: 1Gi
# OAuth Configuration
oauth:
google:
clientId: "143092846986-hsi59m0on2c9rb5qrdoejfceieao2ioc.apps.googleusercontent.com"
clientSecret: "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"
redirectUri: "https://api-my-recipes.dvirlabs.com/auth/google/callback"
azure:
clientId: "db244cf5-eb11-4738-a2ea-5b0716c9ec0a"
clientSecret: "Zad8Q~qRBxaQq8up0lLXAq4pHzrVM2JFGFJhHaDp"
tenantId: "consumers"
redirectUri: "https://api-my-recipes.dvirlabs.com/auth/azure/callback"
# Email Configuration
email:
smtpHost: "smtp.gmail.com"
smtpPort: "587"
smtpUser: "dvirlabs@gmail.com"
smtpPassword: "agaanrhbbazbdytv"
smtpFrom: "dvirlabs@gmail.com"
# R2 Backup Configuration
r2:
endpoint: "https://d4704b8c40b2f95b2c7bf7ee4ecc52f8.r2.cloudflarestorage.com"
accessKey: "" # Set this in my-recipes/values.yaml
secretKey: "" # Set this in my-recipes/values.yaml
backupInterval: "weekly" # Options: test (1 min), daily, weekly
# Ingress configuration
ingress:
enabled: false # Individual frontend/backend ingress resources handle routing instead

View File

@ -26,4 +26,15 @@ spec:
- name: MINIO_ENDPOINT
value: "{{ .Values.backend.env.MINIO_ENDPOINT }}"
- name: MINIO_BUCKET
value: "{{ .Values.backend.env.MINIO_BUCKET }}"
value: "{{ .Values.backend.env.MINIO_BUCKET }}"
{{- if .Values.backend.persistence.enabled }}
volumeMounts:
- name: data
mountPath: /data
{{- end }}
{{- if .Values.backend.persistence.enabled }}
volumes:
- name: data
persistentVolumeClaim:
claimName: navix-backend-data
{{- end }}

View File

@ -0,0 +1,15 @@
{{- if .Values.backend.persistence.enabled }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: navix-backend-data
spec:
accessModes:
- {{ .Values.backend.persistence.accessMode }}
resources:
requests:
storage: {{ .Values.backend.persistence.size }}
{{- if .Values.backend.persistence.storageClass }}
storageClassName: {{ .Values.backend.persistence.storageClass }}
{{- end }}
{{- end }}

View File

@ -35,6 +35,11 @@ backend:
MINIO_SECRET_KEY: "your-secret-key"
MINIO_ENDPOINT: "s3.dvirlabs.com"
MINIO_BUCKET: "navix-icons"
persistence:
enabled: true
accessMode: ReadWriteOnce
size: 1Gi
storageClass: ""
ingress:
enabled: true
className: traefik

View File

@ -0,0 +1,12 @@
apiVersion: v2
name: tasko
description: A Helm chart for Tasko - Modern Task Management Application
type: application
version: 1.0.0
appVersion: "1.0.0"
keywords:
- tasko
- task-management
- productivity
maintainers:
- name: dvir

View File

@ -0,0 +1,228 @@
# Tasko Helm Chart
Kubernetes Helm chart for deploying Tasko task management application.
## Architecture
- **Frontend**: React application served by Nginx
- URL: https://tasko.dvirlabs.com
- Port: 80
- **Backend**: FastAPI Python application
- URL: https://api-tasko.dvirlabs.com
- Port: 8000
- **Database**: PostgreSQL
- Managed by Helm subchart
## Prerequisites
- Kubernetes cluster (1.19+)
- Helm 3.0+
- kubectl configured
- Nginx Ingress Controller installed
- cert-manager installed (for automatic TLS certificates)
## Building Docker Images
### Frontend
```bash
cd frontend
docker build -t tasko-frontend:latest .
docker tag tasko-frontend:latest <your-registry>/tasko-frontend:latest
docker push <your-registry>/tasko-frontend:latest
```
### Backend
```bash
cd backend
docker build -t tasko-backend:latest .
docker tag tasko-backend:latest <your-registry>/tasko-backend:latest
docker push <your-registry>/tasko-backend:latest
```
## Installation
### Quick Install with Default Values
```bash
helm install tasko ./helm/tasko
```
### Install with Custom Values
```bash
helm install tasko ./helm/tasko -f custom-values.yaml
```
### Install with Custom Image Registry
```bash
helm install tasko ./helm/tasko \
--set frontend.image.repository=<your-registry>/tasko-frontend \
--set backend.image.repository=<your-registry>/tasko-backend
```
## Configuration
### Key Configuration Options
| Parameter | Description | Default |
|-----------|-------------|---------|
| `frontend.image.repository` | Frontend image repository | `tasko-frontend` |
| `frontend.image.tag` | Frontend image tag | `latest` |
| `backend.image.repository` | Backend image repository | `tasko-backend` |
| `backend.image.tag` | Backend image tag | `latest` |
| `frontend.ingress.hosts[0].host` | Frontend hostname | `tasko.dvirlabs.com` |
| `backend.ingress.hosts[0].host` | Backend hostname | `api-tasko.dvirlabs.com` |
| `postgresql.enabled` | Enable PostgreSQL subchart | `true` |
| `postgresql.auth.username` | PostgreSQL username | `tasko_user` |
| `postgresql.auth.password` | PostgreSQL password | `tasko_password` |
| `postgresql.auth.database` | PostgreSQL database name | `tasko_db` |
### Custom Values Example
Create a `custom-values.yaml` file:
```yaml
frontend:
image:
repository: myregistry.io/tasko-frontend
tag: "1.0.0"
ingress:
hosts:
- host: tasko.mydomain.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: tasko-frontend-tls
hosts:
- tasko.mydomain.com
backend:
image:
repository: myregistry.io/tasko-backend
tag: "1.0.0"
ingress:
hosts:
- host: api-tasko.mydomain.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: tasko-backend-tls
hosts:
- api-tasko.mydomain.com
postgresql:
auth:
password: "your-secure-password"
```
## Upgrading
```bash
helm upgrade tasko ./helm/tasko -f custom-values.yaml
```
## Uninstalling
```bash
helm uninstall tasko
```
## DNS Configuration
Make sure to configure your DNS to point to your Kubernetes cluster's ingress:
```
tasko.dvirlabs.com A/CNAME <your-ingress-ip-or-hostname>
api-tasko.dvirlabs.com A/CNAME <your-ingress-ip-or-hostname>
```
## TLS Certificates
The chart is configured to use cert-manager with Let's Encrypt for automatic TLS certificate provisioning. Make sure you have:
1. cert-manager installed in your cluster
2. A ClusterIssuer named `letsencrypt-prod` configured
Example ClusterIssuer:
```yaml
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: your-email@example.com
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- http01:
ingress:
class: nginx
```
## Troubleshooting
### Check Pod Status
```bash
kubectl get pods -l app.kubernetes.io/name=tasko
```
### View Logs
```bash
# Frontend logs
kubectl logs -l app.kubernetes.io/component=frontend
# Backend logs
kubectl logs -l app.kubernetes.io/component=backend
# PostgreSQL logs
kubectl logs -l app.kubernetes.io/name=postgresql
```
### Check Ingress
```bash
kubectl get ingress
kubectl describe ingress tasko-frontend
kubectl describe ingress tasko-backend
```
### Database Connection Issues
```bash
# Check if PostgreSQL is running
kubectl get pods -l app.kubernetes.io/name=postgresql
# Test database connection from backend pod
kubectl exec -it <backend-pod-name> -- psql $DATABASE_URL -c "SELECT 1"
```
## CORS Configuration
The backend ingress is pre-configured with CORS headers to allow requests from the frontend domain. The configuration includes:
- `nginx.ingress.kubernetes.io/cors-allow-origin: "https://tasko.dvirlabs.com"`
- `nginx.ingress.kubernetes.io/enable-cors: "true"`
If you change the frontend domain, update the CORS configuration in `values.yaml`.
## Production Considerations
1. **Secrets Management**: Consider using external secret management (e.g., Sealed Secrets, External Secrets Operator)
2. **Database Backups**: Set up regular PostgreSQL backups
3. **Monitoring**: Add Prometheus/Grafana for monitoring
4. **Scaling**: Adjust `replicaCount` for horizontal scaling
5. **Resource Limits**: Tune resource requests/limits based on your workload
6. **Image Security**: Scan Docker images for vulnerabilities
7. **Network Policies**: Implement network policies for additional security
## Support
For issues and questions, please refer to the project repository.

View File

@ -0,0 +1,22 @@
1. Get the frontend application URL by running these commands:
{{- if .Values.frontend.ingress.enabled }}
https://{{ (index .Values.frontend.ingress.hosts 0).host }}
{{- else if contains "NodePort" .Values.frontend.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "tasko.fullname" . }}-frontend)
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.frontend.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "tasko.fullname" . }}-frontend'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "tasko.fullname" . }}-frontend --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.frontend.service.port }}
{{- else if contains "ClusterIP" .Values.frontend.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "tasko.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=frontend" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}
2. Get the backend API URL by running these commands:
{{- if .Values.backend.ingress.enabled }}
https://{{ (index .Values.backend.ingress.hosts 0).host }}
{{- end }}

View File

@ -0,0 +1,60 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "tasko.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
*/}}
{{- define "tasko.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "tasko.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "tasko.labels" -}}
helm.sh/chart: {{ include "tasko.chart" . }}
{{ include "tasko.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "tasko.selectorLabels" -}}
app.kubernetes.io/name: {{ include "tasko.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "tasko.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "tasko.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,80 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "tasko.fullname" . }}-backend
labels:
{{- include "tasko.labels" . | nindent 4 }}
app.kubernetes.io/component: backend
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "tasko.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: backend
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "tasko.selectorLabels" . | nindent 8 }}
app.kubernetes.io/component: backend
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "tasko.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
initContainers:
- name: wait-for-postgres
image: busybox:1.35
command: ['sh', '-c', 'until nc -z {{ include "tasko.fullname" . }}-db-headless {{ .Values.postgres.port | default 5432 }}; do echo waiting for postgres; sleep 2; done;']
containers:
- name: backend
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.backend.image.repository }}:{{ .Values.backend.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.backend.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.backend.service.targetPort }}
protocol: TCP
env:
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: {{ include "tasko.fullname" . }}-secrets
key: database-url
{{- range $key, $value := .Values.backend.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
livenessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 10
periodSeconds: 5
resources:
{{- toYaml .Values.backend.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,42 @@
{{- if .Values.backend.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "tasko.fullname" . }}-backend
labels:
{{- include "tasko.labels" . | nindent 4 }}
app.kubernetes.io/component: backend
{{- with .Values.backend.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.backend.ingress.className }}
ingressClassName: {{ .Values.backend.ingress.className }}
{{- end }}
{{- if .Values.backend.ingress.tls }}
tls:
{{- range .Values.backend.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.backend.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
name: {{ include "tasko.fullname" $ }}-backend
port:
number: {{ $.Values.backend.service.port }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "tasko.fullname" . }}-backend
labels:
{{- include "tasko.labels" . | nindent 4 }}
app.kubernetes.io/component: backend
spec:
type: {{ .Values.backend.service.type }}
ports:
- port: {{ .Values.backend.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "tasko.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: backend

View File

@ -0,0 +1,95 @@
{{- if .Values.postgres }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "tasko.fullname" . }}-db-schema
labels:
{{- include "tasko.labels" . | nindent 4 }}
app.kubernetes.io/component: database
data:
schema.sql: |
-- Create users table
CREATE TABLE IF NOT EXISTS users (
id SERIAL PRIMARY KEY,
username TEXT UNIQUE NOT NULL,
email TEXT UNIQUE NOT NULL,
password_hash TEXT NOT NULL,
first_name TEXT,
last_name TEXT,
display_name TEXT UNIQUE NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_users_username ON users (username);
CREATE INDEX IF NOT EXISTS idx_users_email ON users (email);
CREATE INDEX IF NOT EXISTS idx_users_display_name ON users (display_name);
-- Create tasks table
CREATE TABLE IF NOT EXISTS tasks (
id SERIAL PRIMARY KEY,
title TEXT NOT NULL,
description TEXT,
status TEXT NOT NULL DEFAULT 'pending', -- pending / in_progress / completed / cancelled
priority TEXT DEFAULT 'medium', -- low / medium / high / urgent
due_date TIMESTAMP,
user_id INTEGER REFERENCES users(id) ON DELETE CASCADE,
assigned_to INTEGER REFERENCES users(id) ON DELETE SET NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
completed_at TIMESTAMP
);
-- Indexes for tasks
CREATE INDEX IF NOT EXISTS idx_tasks_status ON tasks (status);
CREATE INDEX IF NOT EXISTS idx_tasks_priority ON tasks (priority);
CREATE INDEX IF NOT EXISTS idx_tasks_user_id ON tasks (user_id);
CREATE INDEX IF NOT EXISTS idx_tasks_assigned_to ON tasks (assigned_to);
CREATE INDEX IF NOT EXISTS idx_tasks_due_date ON tasks (due_date);
-- Create tags table
CREATE TABLE IF NOT EXISTS tags (
id SERIAL PRIMARY KEY,
name TEXT UNIQUE NOT NULL,
color TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Create task_tags junction table
CREATE TABLE IF NOT EXISTS task_tags (
task_id INTEGER REFERENCES tasks(id) ON DELETE CASCADE,
tag_id INTEGER REFERENCES tags(id) ON DELETE CASCADE,
PRIMARY KEY (task_id, tag_id)
);
CREATE INDEX IF NOT EXISTS idx_task_tags_task_id ON task_tags (task_id);
CREATE INDEX IF NOT EXISTS idx_task_tags_tag_id ON task_tags (tag_id);
-- Add display_name column if it doesn't exist (migration support)
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM information_schema.columns
WHERE table_name = 'users' AND column_name = 'display_name'
) THEN
ALTER TABLE users ADD COLUMN display_name TEXT;
-- Set display_name to username for existing users
UPDATE users SET display_name = username WHERE display_name IS NULL;
ALTER TABLE users ALTER COLUMN display_name SET NOT NULL;
ALTER TABLE users ADD CONSTRAINT users_display_name_key UNIQUE (display_name);
END IF;
END $$;
-- Verify schema
SELECT 'Users table:' as info;
SELECT column_name, data_type, is_nullable
FROM information_schema.columns
WHERE table_name = 'users'
ORDER BY ordinal_position;
SELECT 'Tasks table:' as info;
SELECT column_name, data_type, is_nullable
FROM information_schema.columns
WHERE table_name = 'tasks'
ORDER BY ordinal_position;
{{- end }}

View File

@ -0,0 +1,38 @@
{{- if .Values.postgres }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "tasko.fullname" . }}-db-headless
labels:
{{- include "tasko.labels" . | nindent 4 }}
app.kubernetes.io/component: database
spec:
clusterIP: None
selector:
{{- include "tasko.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: database
ports:
- name: postgres
port: {{ .Values.postgres.service.port }}
targetPort: {{ .Values.postgres.service.targetPort }}
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "tasko.fullname" . }}-db
labels:
{{- include "tasko.labels" . | nindent 4 }}
app.kubernetes.io/component: database
spec:
type: {{ .Values.postgres.service.type }}
selector:
{{- include "tasko.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: database
ports:
- name: postgres
port: {{ .Values.postgres.service.port }}
targetPort: {{ .Values.postgres.service.targetPort }}
protocol: TCP
{{- end }}

View File

@ -0,0 +1,87 @@
{{- if .Values.postgres }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "tasko.fullname" . }}-db
labels:
{{- include "tasko.labels" . | nindent 4 }}
app.kubernetes.io/component: database
spec:
serviceName: {{ include "tasko.fullname" . }}-db-headless
replicas: 1
selector:
matchLabels:
{{- include "tasko.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: database
template:
metadata:
labels:
{{- include "tasko.selectorLabels" . | nindent 8 }}
app.kubernetes.io/component: database
spec:
containers:
- name: postgres
image: "{{ .Values.postgres.image.repository }}:{{ .Values.postgres.image.tag }}"
imagePullPolicy: {{ .Values.postgres.image.pullPolicy }}
ports:
- containerPort: {{ .Values.postgres.port }}
name: postgres
protocol: TCP
env:
- name: POSTGRES_USER
value: {{ .Values.postgres.user | quote }}
- name: POSTGRES_PASSWORD
value: {{ .Values.postgres.password | quote }}
- name: POSTGRES_DB
value: {{ .Values.postgres.database | quote }}
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
volumeMounts:
- name: data
mountPath: /var/lib/postgresql/data
- name: init-sql
mountPath: /docker-entrypoint-initdb.d
livenessProbe:
exec:
command:
- /bin/sh
- -c
- pg_isready -U {{ .Values.postgres.user }}
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
exec:
command:
- /bin/sh
- -c
- pg_isready -U {{ .Values.postgres.user }}
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 2
resources:
requests:
cpu: {{ .Values.postgres.resources.requests.cpu }}
memory: {{ .Values.postgres.resources.requests.memory }}
limits:
cpu: {{ .Values.postgres.resources.limits.cpu }}
memory: {{ .Values.postgres.resources.limits.memory }}
volumes:
- name: init-sql
configMap:
name: {{ include "tasko.fullname" . }}-db-schema
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- {{ .Values.postgres.persistence.accessMode }}
resources:
requests:
storage: {{ .Values.postgres.persistence.size }}
{{- if .Values.postgres.persistence.storageClass }}
storageClassName: {{ .Values.postgres.persistence.storageClass | quote }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,73 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "tasko.fullname" . }}-frontend
labels:
{{- include "tasko.labels" . | nindent 4 }}
app.kubernetes.io/component: frontend
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "tasko.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: frontend
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "tasko.selectorLabels" . | nindent 8 }}
app.kubernetes.io/component: frontend
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "tasko.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: frontend
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.frontend.image.repository }}:{{ .Values.frontend.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.frontend.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.frontend.service.targetPort }}
protocol: TCP
{{- if .Values.frontend.env }}
env:
{{- range $key, $value := .Values.frontend.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
{{- end }}
livenessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 10
periodSeconds: 5
resources:
{{- toYaml .Values.frontend.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,42 @@
{{- if .Values.frontend.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "tasko.fullname" . }}-frontend
labels:
{{- include "tasko.labels" . | nindent 4 }}
app.kubernetes.io/component: frontend
{{- with .Values.frontend.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.frontend.ingress.className }}
ingressClassName: {{ .Values.frontend.ingress.className }}
{{- end }}
{{- if .Values.frontend.ingress.tls }}
tls:
{{- range .Values.frontend.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.frontend.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
name: {{ include "tasko.fullname" $ }}-frontend
port:
number: {{ $.Values.frontend.service.port }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "tasko.fullname" . }}-frontend
labels:
{{- include "tasko.labels" . | nindent 4 }}
app.kubernetes.io/component: frontend
spec:
type: {{ .Values.frontend.service.type }}
ports:
- port: {{ .Values.frontend.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "tasko.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: frontend

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Secret
metadata:
name: tasko-secrets
labels:
{{- include "tasko.labels" . | nindent 4 }}
type: Opaque
stringData:
{{- if .Values.postgres }}
database-url: "postgresql://{{ .Values.postgres.user }}:{{ .Values.postgres.password }}@{{ include "tasko.fullname" . }}-db-headless:{{ .Values.postgres.port }}/{{ .Values.postgres.database }}"
{{- else if .Values.postgresql }}
database-url: "postgresql://{{ .Values.postgresql.auth.username }}:{{ .Values.postgresql.auth.password }}@{{ include "tasko.fullname" . }}-postgresql:5432/{{ .Values.postgresql.auth.database }}"
{{- end }}

View File

@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "tasko.serviceAccountName" . }}
labels:
{{- include "tasko.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,136 @@
global:
namespace: my-apps
imagePullSecrets: []
# Backend configuration
backend:
name: backend
replicaCount: 1
image:
repository: harbor.dvirlabs.com/my-apps/tasko-backend
pullPolicy: IfNotPresent
tag: "latest"
service:
type: ClusterIP
port: 8000
targetPort: 8000
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
env:
PYTHONUNBUFFERED: "1"
ingress:
enabled: true
className: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: api-tasko.dvirlabs.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: api-tasko-tls
hosts:
- api-tasko.dvirlabs.com
# Frontend configuration
frontend:
name: frontend
replicaCount: 1
image:
repository: harbor.dvirlabs.com/my-apps/tasko-frontend
pullPolicy: IfNotPresent
tag: "latest"
service:
type: ClusterIP
port: 80
targetPort: 80
env:
VITE_API_URL: "https://api-tasko.dvirlabs.com"
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 200m
memory: 256Mi
ingress:
enabled: true
className: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: tasko.dvirlabs.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: tasko-tls
hosts:
- tasko.dvirlabs.com
# PostgreSQL configuration
postgres:
name: db
image:
repository: postgres
tag: "16-alpine"
pullPolicy: IfNotPresent
user: tasko_user
password: tasko_password
database: tasko_db
port: 5432
service:
type: ClusterIP
port: 5432
targetPort: 5432
persistence:
enabled: true
accessMode: ReadWriteOnce
storageClass: "nfs-client"
size: 10Gi
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 1000m
memory: 1Gi
# Ingress configuration
ingress:
enabled: false # Individual frontend/backend ingress resources handle routing instead
className: "traefik"
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: tasko.dvirlabs.com
paths:
- path: /
pathType: Prefix
backend: frontend
tls:
- secretName: tasko-tls
hosts:
- tasko.dvirlabs.com

View File

@ -48,6 +48,21 @@ spec:
secretKeyRef:
name: {{ include "tasko.fullname" . }}-secrets
key: database-url
- name: GOOGLE_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ include "tasko.fullname" . }}-secrets
key: google-client-id
- name: GOOGLE_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ include "tasko.fullname" . }}-secrets
key: google-client-secret
- name: SESSION_SECRET
valueFrom:
secretKeyRef:
name: {{ include "tasko.fullname" . }}-secrets
key: session-secret
{{- range $key, $value := .Values.backend.env }}
- name: {{ $key }}
value: {{ $value | quote }}

View File

@ -1,7 +1,7 @@
apiVersion: v1
kind: Secret
metadata:
name: tasko-secrets
name: {{ include "tasko.fullname" . }}-secrets
labels:
{{- include "tasko.labels" . | nindent 4 }}
type: Opaque
@ -11,3 +11,8 @@ stringData:
{{- else if .Values.postgresql }}
database-url: "postgresql://{{ .Values.postgresql.auth.username }}:{{ .Values.postgresql.auth.password }}@{{ include "tasko.fullname" . }}-postgresql:5432/{{ .Values.postgresql.auth.database }}"
{{- end }}
# OAuth Secrets
google-client-id: {{ .Values.backend.oauth.google.clientId | quote }}
google-client-secret: {{ .Values.backend.oauth.google.clientSecret | quote }}
# Session Secret for signing cookies
session-secret: {{ .Values.backend.sessionSecret | quote }}

View File

@ -26,6 +26,18 @@ backend:
env:
PYTHONUNBUFFERED: "1"
ENVIRONMENT: "production"
GOOGLE_REDIRECT_URI: "https://api-tasko.dvirlabs.com/auth/google/callback"
FRONTEND_URL: "https://tasko.dvirlabs.com"
# OAuth Configuration (stored as secrets)
oauth:
google:
clientId: "672182384838-vob26vd0qhmf0g9mru4u4sibkqre0rfa.apps.googleusercontent.com"
clientSecret: "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"
# Session secret for signing cookies (generate with: python -c "import secrets; print(secrets.token_hex(32))")
sessionSecret: "7f8a9b6c5d4e3f2a1b0c9d8e7f6a5b4c3d2e1f0a9b8c7d6e5f4a3b2c1d0e9f8a"
ingress:
enabled: true
@ -107,16 +119,15 @@ postgres:
persistence:
enabled: true
accessMode: ReadWriteOnce
storageClass: "nfs-client"
size: 10Gi
size: 8Gi
resources:
requests:
cpu: 100m
memory: 256Mi
memory: 128Mi
limits:
cpu: 1000m
memory: 1Gi
cpu: 500m
memory: 512Mi
# Ingress configuration
ingress:
@ -134,3 +145,9 @@ ingress:
- secretName: tasko-tls
hosts:
- tasko.dvirlabs.com
# Service Account configuration
serviceAccount:
create: true
annotations: {}
name: ""

View File

@ -0,0 +1,2 @@
enabled: true
hostname: calink.dvirlabs.com

View File

@ -0,0 +1,84 @@
nameOverride: ""
fullnameOverride: ""
commonLabels: {}
commonAnnotations: {}
backend:
image:
repository: harbor.dvirlabs.com/my-apps/calink-backend
tag: master-e6bd63c
pullPolicy: IfNotPresent
replicas: 1
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 250m
memory: 256Mi
env:
- name: DATABASE_PATH
value: "/data/app.db"
persistence:
enabled: true
storageClass: "nfs-client"
size: 1Gi
mountPath: /data
service:
type: ClusterIP
port: 8000
healthCheck:
path: /health
initialDelaySeconds: 10
periodSeconds: 30
ingress:
enabled: true
className: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: api-calink.dvirlabs.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: api-calink-tls
hosts:
- api-calink.dvirlabs.com
frontend:
image:
repository: harbor.dvirlabs.com/my-apps/calink-frontend
tag: master-e6bd63c
pullPolicy: IfNotPresent
replicas: 1
resources:
limits:
cpu: 200m
memory: 256Mi
requests:
cpu: 100m
memory: 128Mi
service:
type: ClusterIP
port: 80
healthCheck:
path: /
initialDelaySeconds: 5
periodSeconds: 30
ingress:
enabled: true
className: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: calink.dvirlabs.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: calink-tls
hosts:
- calink.dvirlabs.com

View File

@ -0,0 +1,2 @@
enabled: true
hostname: dateme.dvirlabs.com

View File

@ -0,0 +1,120 @@
global:
namespace: my-apps
imagePullSecrets: []
# Backend configuration
backend:
name: backend
replicaCount: 1
image:
repository: harbor.dvirlabs.com/my-apps/dateme-backend
pullPolicy: Always
tag: master-a1e6c12
service:
type: ClusterIP
port: 8000
targetPort: 8000
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
env:
PYTHONUNBUFFERED: "1"
ingress:
enabled: true
className: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: api-dateme.dvirlabs.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: api-dateme-tls
hosts:
- api-dateme.dvirlabs.com
# Frontend configuration
frontend:
name: frontend
replicaCount: 1
image:
repository: harbor.dvirlabs.com/my-apps/dateme-frontend
pullPolicy: Always
tag: master-7c2a387
service:
type: ClusterIP
port: 80
targetPort: 80
env:
API_BASE: "https://api-dateme.dvirlabs.com"
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 200m
memory: 256Mi
ingress:
enabled: true
className: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: dateme.dvirlabs.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: dateme-tls
hosts:
- dateme.dvirlabs.com
externalUrl: "https://dateme.dvirlabs.com"
# PostgreSQL configuration
postgres:
name: db
image:
repository: postgres
tag: "16"
pullPolicy: IfNotPresent
user: dateme_user
password: dateme_password
database: dateme_db
port: 5432
service:
type: ClusterIP
port: 5432
targetPort: 5432
persistence:
enabled: true
accessMode: ReadWriteOnce
storageClass: "nfs-client"
size: 10Gi
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
# Ingress (top-level, disabled - use component-specific ingress instead)
ingress:
enabled: false
className: "traefik"
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: dateme.dvirlabs.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: dateme-tls
hosts:
- dateme.dvirlabs.com

View File

@ -0,0 +1,2 @@
enabled: true
hostname: invy.dvirlabs.com

159
manifests/invy/values.yaml Normal file
View File

@ -0,0 +1,159 @@
global:
namespace: my-apps
imagePullSecrets: []
# Backend configuration
backend:
name: backend
replicaCount: 1
image:
repository: harbor.dvirlabs.com/my-apps/invy-backend
pullPolicy: IfNotPresent
tag: "master-71b6828"
service:
type: ClusterIP
port: 8000
targetPort: 8000
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
env:
PYTHONUNBUFFERED: "1"
GOOGLE_REDIRECT_URI: "https://api-invy.dvirlabs.com/auth/google/callback"
FRONTEND_URL: "https://invy.dvirlabs.com"
# Google OAuth credentials
googleClientId: "97702229450-ivi5rvj0drai08k5svm7sekqdijj6953.apps.googleusercontent.com"
googleClientSecret: "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"
# WhatsApp Cloud API credentials
whatsappAccessToken: "EAAMdmYX7DJUBQ5Hc07NUTSgvu1ZCF51FfQRrrUNuuZBpaRLpoT2BzXa6rFcsZC8BJZCHS7j7rJFpFAwlKlwm0LsRgDpQRMU0MDcqaOIPzJ56Xp2ueBArGaPn2gGyj9dp1X7VPt0N8bZCBcUPOsTpKTflWjj09Y1NahbtZAQ5msXo2JV1wVTnaQrzKN7wYwkpLxNAZDZD"
whatsappPhoneNumberId: "1028674740318926"
ingress:
enabled: true
className: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: api-invy.dvirlabs.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: api-invy-tls
hosts:
- api-invy.dvirlabs.com
# Frontend configuration
frontend:
name: frontend
replicaCount: 1
image:
repository: harbor.dvirlabs.com/my-apps/invy-frontend
pullPolicy: IfNotPresent
tag: "master-71b6828"
service:
type: ClusterIP
port: 80
targetPort: 80
env:
VITE_API_URL: "https://api-invy.dvirlabs.com"
# Admin login credentials
adminUsername: "admin"
adminPassword: "wedding2025"
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 200m
memory: 256Mi
ingress:
enabled: true
className: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: invy.dvirlabs.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: invy-tls
hosts:
- invy.dvirlabs.com
# PostgreSQL configuration
postgres:
name: db
image:
repository: postgres
tag: "16-alpine"
pullPolicy: IfNotPresent
user: invy_user
password: invy_password
database: invy_db
port: 5432
service:
type: ClusterIP
port: 5432
targetPort: 5432
persistence:
enabled: true
accessMode: ReadWriteOnce
storageClass: "nfs-client"
size: 10Gi
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 1000m
memory: 1Gi
# Ingress configuration
ingress:
enabled: false # Individual frontend/backend ingress resources handle routing instead
className: "traefik"
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: invy.dvirlabs.com
paths:
- path: /
pathType: Prefix
backend: frontend
tls:
- secretName: invy-tls
hosts:
- invy.dvirlabs.com
# Service Account
serviceAccount:
create: true
annotations: {}
name: "default"
# Pod annotations
podAnnotations: {}
# Pod security context
podSecurityContext: {}
# fsGroup: 2000
# Container security context
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# Node selector
nodeSelector: {}
# Tolerations
tolerations: []
# Affinity
affinity: {}
# Replica count (default for both frontend and backend if not specified)
replicaCount: 1

View File

@ -0,0 +1,2 @@
enabled: true
hostname: ipify.dvirlabs.com

View File

@ -0,0 +1,87 @@
# Basic configuration values for IP Calculator application
# Application metadata
app:
name: ipify
version: "1.0.0"
description: "IP Subnet Calculator with React + Vite frontend and FastAPI backend"
# Backend configuration
backend:
name: ipify-backend
replicaCount: 1
image:
repository: harbor.dvirlabs.com/my-apps/ipify-backend
pullPolicy: Always
tag: master-894b429
service:
type: ClusterIP
port: 8000
targetPort: 8000
resources:
limits:
cpu: "500m"
memory: "512Mi"
requests:
cpu: "250m"
memory: "256Mi"
ingress:
enabled: true
className: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: api-ipify.dvirlabs.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: api-ipify-tls
hosts:
- api-ipify.dvirlabs.com
# Frontend configuration
frontend:
name: ipify-frontend
replicaCount: 1
image:
repository: harbor.dvirlabs.com/my-apps/ipify-frontend
pullPolicy: Always
tag: master-67d217c
service:
type: ClusterIP
port: 80
targetPort: 80
resources:
limits:
cpu: "200m"
memory: "256Mi"
requests:
cpu: "100m"
memory: "128Mi"
ingress:
enabled: true
className: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: ipify.dvirlabs.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: ipify-tls
hosts:
- ipify.dvirlabs.com
# Environment variables
env:
backendUrl: "https://api-ipify.dvirlabs.com"
# Ingress configuration (kept for compatibility)
ingress:
enabled: false

View File

@ -8,7 +8,7 @@ backend:
image:
repository: harbor.dvirlabs.com/my-apps/my-recipes-backend
pullPolicy: Always
tag: develop-0f3aa43
tag: develop-82855cf
service:
type: ClusterIP
port: 8000
@ -22,6 +22,7 @@ backend:
memory: 512Mi
env:
PYTHONUNBUFFERED: "1"
ENVIRONMENT: "production"
ingress:
enabled: true
className: "traefik"
@ -45,7 +46,7 @@ frontend:
image:
repository: harbor.dvirlabs.com/my-apps/my-recipes-frontend
pullPolicy: Always
tag: develop-6d5b8f2
tag: develop-d36f4bc
service:
type: ClusterIP
port: 80
@ -75,6 +76,7 @@ frontend:
- secretName: my-recipes-tls
hosts:
- my-recipes.dvirlabs.com
externalUrl: "https://my-recipes.dvirlabs.com"
# Admin user configuration
admin:
username: "admin"
@ -106,10 +108,34 @@ postgres:
resources:
requests:
cpu: 100m
memory: 256Mi
memory: 1Gi
limits:
cpu: 1000m
memory: 1Gi
# OAuth Configuration
oauth:
google:
clientId: "143092846986-hsi59m0on2c9rb5qrdoejfceieao2ioc.apps.googleusercontent.com"
clientSecret: "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"
redirectUri: "https://api-my-recipes.dvirlabs.com/auth/google/callback"
azure:
clientId: "db244cf5-eb11-4738-a2ea-5b0716c9ec0a"
clientSecret: "Zad8Q~qRBxaQq8up0lLXAq4pHzrVM2JFGFJhHaDp"
tenantId: "consumers"
redirectUri: "https://api-my-recipes.dvirlabs.com/auth/azure/callback"
# Email Configuration
email:
smtpHost: "smtp.gmail.com"
smtpPort: "587"
smtpUser: "dvirlabs@gmail.com"
smtpPassword: "agaanrhbbazbdytv"
smtpFrom: "dvirlabs@gmail.com"
# R2 Backup Configuration
r2:
endpoint: "https://d4704b8c40b2f95b2c7bf7ee4ecc52f8.r2.cloudflarestorage.com"
accessKey: "1997b1e48a337c0dbe1f7552a08631b5" # Replace with actual R2 access key
secretKey: "369694e39fedfedb254158c147171f5760de84fa2346d5d5d5a961f1f517dbc6" # Replace with actual R2 secret key
backupInterval: "weekly"
# Ingress (top-level, disabled - use component-specific ingress instead)
ingress:
enabled: false

View File

@ -25,6 +25,7 @@ backend:
image:
repository: harbor.dvirlabs.com/my-apps/navix-backend
pullPolicy: IfNotPresent
tag: master-0337f70
tag: master-62a2769
service:
type: ClusterIP
@ -34,6 +35,11 @@ backend:
MINIO_SECRET_KEY: "your-secret-key"
MINIO_ENDPOINT: "s3.dvirlabs.com"
MINIO_BUCKET: "navix-icons"
persistence:
enabled: true
accessMode: ReadWriteOnce
size: 1Gi
storageClass: "nfs-client"
ingress:
enabled: true
className: traefik

View File

@ -1,13 +1,15 @@
global:
namespace: my-apps
imagePullSecrets: []
# Backend configuration
backend:
name: backend
replicaCount: 1
image:
repository: harbor.dvirlabs.com/my-apps/tasko-backend
pullPolicy: Always
tag: master-8094192
pullPolicy: IfNotPresent
tag: "master-b2d800a"
service:
type: ClusterIP
port: 8000
@ -21,6 +23,16 @@ backend:
memory: 512Mi
env:
PYTHONUNBUFFERED: "1"
ENVIRONMENT: "production"
GOOGLE_REDIRECT_URI: "https://api-tasko.dvirlabs.com/auth/google/callback"
FRONTEND_URL: "https://tasko.dvirlabs.com"
# OAuth Configuration (stored as secrets)
oauth:
google:
clientId: "672182384838-vob26vd0qhmf0g9mru4u4sibkqre0rfa.apps.googleusercontent.com"
clientSecret: "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"
# Session secret for signing cookies (generate with: python -c "import secrets; print(secrets.token_hex(32))")
sessionSecret: "7f8a9b6c5d4e3f2a1b0c9d8e7f6a5b4c3d2e1f0a9b8c7d6e5f4a3b2c1d0e9f8a"
ingress:
enabled: true
className: "traefik"
@ -43,8 +55,8 @@ frontend:
replicaCount: 1
image:
repository: harbor.dvirlabs.com/my-apps/tasko-frontend
pullPolicy: Always
tag: master-3439cb7
pullPolicy: IfNotPresent
tag: "master-4e0ae2e"
service:
type: ClusterIP
port: 80
@ -93,7 +105,6 @@ postgres:
enabled: true
accessMode: ReadWriteOnce
size: 8Gi
storageClass: ""
resources:
requests:
cpu: 100m
@ -101,6 +112,24 @@ postgres:
limits:
cpu: 500m
memory: 512Mi
# Use default ServiceAccount (has harbor-creds imagePullSecret)
# Ingress configuration
ingress:
enabled: false # Individual frontend/backend ingress resources handle routing instead
className: "traefik"
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: tasko.dvirlabs.com
paths:
- path: /
pathType: Prefix
backend: frontend
tls:
- secretName: tasko-tls
hosts:
- tasko.dvirlabs.com
# Service Account configuration
serviceAccount:
create: false
create: true
annotations: {}
name: ""