add cyberchef, mariadb, postgres slskd, update ntfy

This commit is contained in:
root 2025-04-24 14:23:48 -04:00
parent caf7959ba9
commit 64df2f8f50
7 changed files with 494 additions and 80 deletions

2
.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
temp/
secrets/

38
cyberchef.yaml Normal file
View file

@ -0,0 +1,38 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: cyberchef
name: cyberchef
spec:
replicas: 1
selector:
matchLabels:
app: cyberchef
template:
metadata:
labels:
app: cyberchef
spec:
containers:
- image: mpepping/cyberchef:latest
name: cyberchef
ports:
- containerPort: 8000
protocol: TCP
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
labels:
app: cyberchef
name: cyberchef-svc
spec:
ports:
- name: "8000"
port: 8000
targetPort: 8000
selector:
app: cyberchef

128
mariadb.yaml Normal file
View file

@ -0,0 +1,128 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: mariadb
name: mariadb
spec:
replicas: 1
selector:
matchLabels:
app: mariadb
strategy:
type: Recreate
template:
metadata:
annotations:
traefik.enable: "false"
labels:
app: mariadb
spec:
containers:
- env:
- name: MYSQL_PASSWORD
valueFrom:
configMapKeyRef:
key: MYSQL_PASSWORD
name: mariadb-secrets-env
- name: MYSQL_ROOT_PASSWORD
valueFrom:
configMapKeyRef:
key: MYSQL_ROOT_PASSWORD
name: mariadb-secrets-env
- name: MYSQL_USER
valueFrom:
configMapKeyRef:
key: MYSQL_USER
name: mariadb-secrets-env
image: linuxserver/mariadb:latest
name: mariadb
ports:
- containerPort: 3306
protocol: TCP
volumeMounts:
- mountPath: /config
name: mariadb-data-volume
- mountPath: /config/conf
name: mariadb-config-volume
volumes:
- name: mariadb-config-volume
persistentVolumeClaim:
claimName: mariadb-config-pvc
- name: mariadb-data-volume
persistentVolumeClaim:
claimName: mariadb-data-pvc
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
annotations:
traefik.enable: "false"
name: mariadb-svc
spec:
ports:
- name: "3306"
port: 3306
targetPort: 3306
nodePort: 31306
type: NodePort
selector:
app: mariadb
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mariadb-data-pv
spec:
capacity:
storage: 100Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: ""
nfs:
path: /mnt/raid/00_meta/02_services/mariadb
server: 192.168.1.146
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mariadb-data-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
volumeName: mariadb-data-pv
storageClassName: ""
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mariadb-config-pv
spec:
capacity:
storage: 100Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: ""
nfs:
path: /mnt/raid/00_meta/05_service_config/mariadb/conf
server: 192.168.1.146
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mariadb-config-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
volumeName: mariadb-config-pv
storageClassName: ""

View file

@ -1,26 +0,0 @@
services:
ntfy:
image: binwiederhier/ntfy
container_name: ntfy
command:
- serve
environment:
- TZ=America/New_York # optional: set desired timezone
# user: UID:GID # optional: replace with your own user/group or uid/gid
# volumes:
# - $DATA_PATH/ntfy:/var/cache/ntfy
# - $CONF_DIR/ntfy:/etc/ntfy
ports:
- 80:80
healthcheck: # optional: remember to adapt the host:port to your environment
test:
[
"CMD-SHELL",
"wget -q --tries=1 http://localhost:80/v1/health -O - | grep -Eo '\"healthy\"\\s*:\\s*true' || exit 1",
]
interval: 60s
timeout: 10s
retries: 3
start_period: 40s
restart: unless-stopped

143
ntfy.yaml
View file

@ -9,7 +9,7 @@ spec:
replicas: 1 replicas: 1
selector: selector:
matchLabels: matchLabels:
io.kompose.service: ntfy app: ntfy
template: template:
metadata: metadata:
labels: labels:
@ -22,28 +22,27 @@ spec:
- name: TZ - name: TZ
value: America/New_York value: America/New_York
image: binwiederhier/ntfy image: binwiederhier/ntfy
livenessProbe: volumeMounts:
exec: - name: ntfy-cache-volume
command: mountPath: /var/cache/ntfy
- wget -q --tries=1 http://localhost:80/v1/health -O - | grep -Eo '"healthy"\s*:\s*true' || exit 1 - name: ntfy-config-volume
failureThreshold: 3 mountPath: /etc/ntfy
initialDelaySeconds: 40
periodSeconds: 60
timeoutSeconds: 10
name: ntfy name: ntfy
ports: ports:
- containerPort: 80 - containerPort: 80
protocol: TCP protocol: TCP
volumes: volumes:
- name: smb-storage - name: ntfy-cache-volume
PersistentVolumeClaim: persistentVolumeClaim:
claimName: smb-pvc claimName: ntfy-cache-pvc
- name: ntfy-config-volume
persistentVolumeClaim:
claimName: ntfy-config-pvc
restartPolicy: Always restartPolicy: Always
--- ---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
labels:
name: ntfy-svc name: ntfy-svc
spec: spec:
ports: ports:
@ -55,7 +54,7 @@ spec:
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1
kind: Ingress kind: Ingress
metadata: metadata:
name: adminer-http name: ntfy-http
annotations: annotations:
traefik.ingress.kubernetes.io/router.entrypoints: web traefik.ingress.kubernetes.io/router.entrypoints: web
spec: spec:
@ -67,54 +66,90 @@ spec:
pathType: Prefix pathType: Prefix
backend: backend:
service: service:
name: adminer-svc name: ntfy-svc
port: port:
number: 80 number: 80
--- ---
# TODO: make this use samba
apiVersion: v1
kind: PersistentVolume
metadata:
name: ntfy-config-pv
spec:
capacity:
storage: 100Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMultiple
persistentVolumeReclaimPolicy: Delete
storageClassName: local-storage
local:
path: /mnt/raid # TODO: make this point to correct dir
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- lipotropin
---
# TODO: make this use samba
apiVersion: v1 apiVersion: v1
kind: PersistentVolume kind: PersistentVolume
metadata: metadata:
annotations:
pv.kubernetes.io/provisioned-by: smb.csi.k8s.io
name: ntfy-cache-pv name: ntfy-cache-pv
spec: spec:
capacity: capacity:
storage: 100Gi storage: 100Gi
volumeMode: Filesystem
accessModes: accessModes:
- ReadWriteMultiple - ReadWriteOnce
persistentVolumeReclaimPolicy: Delete persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage storageClassName: smb
local: mountOptions:
path: /mnt/raid # TODO: make this point to correct dir - dir_mode=0777
nodeAffinity: - file_mode=0777
required: # - vers=3.0
nodeSelectorTerms: csi:
- matchExpressions: driver: smb.csi.k8s.io
- key: kubernetes.io/hostname # volumeHandle format: {smb-server-address}#{sub-dir-name}#{share-name}
operator: In # make sure this value is unique for every share in the cluster
values: volumeHandle: lipotropin.lan#meta/services/ntfy#raid#
- lipotropin volumeAttributes:
source: //192.168.1.146/raid
subDir: 00_meta/02_services/ntfy
nodeStageSecretRef:
name: smbcreds
namespace: default
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ntfy-cache-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
volumeName: ntfy-cache-pv
storageClassName: smb
---
apiVersion: v1
kind: PersistentVolume
metadata:
annotations:
pv.kubernetes.io/provisioned-by: smb.csi.k8s.io
name: ntfy-config-pv
spec:
capacity:
storage: 100Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: smb
mountOptions:
- dir_mode=0777
- file_mode=0777
# - vers=3.0
csi:
driver: smb.csi.k8s.io
# volumeHandle format: {smb-server-address}#{sub-dir-name}#{share-name}
# make sure this value is unique for every share in the cluster
volumeHandle: lipotropin.lan#meta/services_config/ntfy#raid#
volumeAttributes:
source: //192.168.1.146/raid
subDir: 00_meta/05_service_config/ntfy
nodeStageSecretRef:
name: smbcreds
namespace: default
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ntfy-config-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
volumeName: ntfy-config-pv
storageClassName: smb

86
postgres.yaml Normal file
View file

@ -0,0 +1,86 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: postgres
name: postgres
spec:
replicas: 1
selector:
matchLabels:
app: postgres
strategy:
type: Recreate
template:
metadata:
labels:
app: postgres
spec:
containers:
- env:
- name: POSTGRES_PASSWORD
valueFrom:
configMapKeyRef:
key: POSTGRES_PASSWORD
name: postgres-secrets-env
- name: POSTGRES_USER
valueFrom:
configMapKeyRef:
key: POSTGRES_USER
name: postgres-secrets-env
image: postgres:15-alpine
name: postgres
ports:
- containerPort: 5432
protocol: TCP
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: postgres-data-volume
restartPolicy: Always
volumes:
- name: postgres-data-volume
persistentVolumeClaim:
claimName: postgres-data-pvc
---
apiVersion: v1
kind: Service
metadata:
name: postgres-svc
spec:
ports:
- name: "5432"
port: 5432
targetPort: 5432
nodePort: 31432
type: NodePort
selector:
app: postgres
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: postgres-data-pv
spec:
capacity:
storage: 100Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: ""
nfs:
path: /mnt/raid/00_meta/02_services/postgresql/
server: 192.168.1.146
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres-data-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
volumeName: postgres-data-pv
storageClassName: ""

151
slskd.yaml Normal file
View file

@ -0,0 +1,151 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: slskd
name: slskd
spec:
replicas: 1
selector:
matchLabels:
app: slskd
strategy:
type: Recreate
template:
metadata:
labels:
app: slskd
spec:
containers:
- image: slskd/slskd
name: slskd
ports:
- containerPort: 5030
protocol: TCP
- containerPort: 5031
protocol: TCP
- containerPort: 50300
protocol: TCP
volumeMounts:
- mountPath: /app
name: slskd-config-volume
- mountPath: /app/downloads
name: slskd-downloads-volume
- mountPath: /music
name: slskd-music-volume
restartPolicy: Always
volumes:
- name: slskd-config-volume
persistentVolumeClaim:
claimName: slskd-config-pvc
- name: slskd-downloads-volume
persistentVolumeClaim:
claimName: slskd-downloads-pvc
- name: slskd-music-volume
persistentVolumeClaim:
claimName: slskd-music-pvc
---
apiVersion: v1
kind: Service
metadata:
labels:
app: slskd
name: slskd-svc
spec:
ports:
- name: "5030"
port: 5030
targetPort: 5030
- name: "5031"
port: 5031
targetPort: 5031
- name: "50300"
port: 50300
targetPort: 50300
selector:
app: slskd
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: slskd-config-pv
spec:
capacity:
storage: 100Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: ""
nfs:
path: /mnt/raid/00_meta/05_service_config/slskd
server: 192.168.1.146
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: slskd-config-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
volumeName: slskd-config-pv
storageClassName: ""
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: slskd-downloads-pv
spec:
capacity:
storage: 100Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: ""
nfs:
path: /mnt/raid/30_media/33_music/
server: 192.168.1.146
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: slskd-downloads-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
volumeName: slskd-downloads-pv
storageClassName: ""
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: slskd-music-pv
spec:
capacity:
storage: 100Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: ""
nfs:
path: /mnt/raid/30_media/33_music/
server: 192.168.1.146
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: slskd-music-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
volumeName: slskd-music-pv
storageClassName: ""