ADD: added dashboard and photoprism

This commit is contained in:
henry
2025-04-28 21:13:58 +02:00
parent 29ac3974b7
commit 9a2e50b438
16 changed files with 1225 additions and 0 deletions

View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard

1
k3s/dashboard/getToken.sh Executable file
View File

@@ -0,0 +1 @@
kubectl -n kubernetes-dashboard create token admin-user

303
k3s/dashboard/manifest.yaml Normal file
View File

@@ -0,0 +1,303 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:latest
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}

452
k3s/dashboard/values.yaml Normal file
View File

@@ -0,0 +1,452 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# General configuration shared across resources
app:
# Mode determines if chart should deploy a full Dashboard with all containers or just the API.
# - dashboard - deploys all the containers
# - api - deploys just the API
mode: 'dashboard'
image:
pullPolicy: IfNotPresent
pullSecrets: []
scheduling:
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
nodeSelector: {}
security:
# Allow overriding csrfKey used by API/Auth containers.
# It has to be base64 encoded random 256 bytes string.
# If empty, it will be autogenerated.
csrfKey: ~
# SecurityContext to be added to pods
# To disable set the following configuration to null:
# securityContext: null
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
# ContainerSecurityContext to be added to containers
# To disable set the following configuration to null:
# containerSecurityContext: null
containerSecurityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
capabilities:
drop: ["ALL"]
# Pod Disruption Budget configuration
# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget:
enabled: false
minAvailable: 0
maxUnavailable: 0
networkPolicy:
enabled: false
ingressDenyAll: false
# Raw network policy spec that overrides predefined spec
# Example:
# spec:
# egress:
# - ports:
# - port: 123
spec: {}
# Common labels & annotations shared across all deployed resources
labels: {}
annotations: {}
# Common priority class used for all deployed resources
priorityClassName: null
settings:
## Global dashboard settings
global:
# # Cluster name that appears in the browser window title if it is set
# clusterName: ""
# # Max number of items that can be displayed on each list page
# itemsPerPage: 10
# # Max number of labels that are displayed by default on most views.
# labelsLimit: 3
# # Number of seconds between every auto-refresh of logs
# logsAutoRefreshTimeInterval: 5
# # Number of seconds between every auto-refresh of every resource. Set 0 to disable
# resourceAutoRefreshTimeInterval: 10
# # Hide all access denied warnings in the notification panel
# disableAccessDeniedNotifications: false
# # Hide all namespaces option in namespace selection dropdown to avoid accidental selection in large clusters thus preventing OOM errors
# hideAllNamespaces: false
# # Namespace that should be selected by default after logging in.
# defaultNamespace: default
# # List of namespaces that should be presented to user without namespace list privileges.
# namespaceFallbackList:
# - default
## Pinned resources that will be displayed in dashboard's menu
pinnedResources: []
# - kind: customresourcedefinition
# # Fully qualified name of a CRD
# name: prometheus.monitoring.coreos.com
# # Display name
# displayName: Prometheus
# # Is this CRD namespaced?
# namespaced: true
ingress:
enabled: false
hosts:
# Keep 'localhost' host only if you want to access Dashboard using 'kubectl port-forward ...' on:
# https://localhost:8443
# - localhost
- dashboard
# - kubernetes.dashboard.domain.com
ingressClassName: internal-nginx
# Use only if your ingress controllers support default ingress classes.
# If set to true ingressClassName will be ignored and not added to the Ingress resources.
# It should fall back to using IngressClass marked as the default.
useDefaultIngressClass: false
# This will append our Ingress with annotations required by our default configuration.
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# nginx.ingress.kubernetes.io/ssl-passthrough: "true"
# nginx.ingress.kubernetes.io/ssl-redirect: "true"
useDefaultAnnotations: true
pathType: ImplementationSpecific
# If path is not the default (/), rewrite-target annotation will be added to the Ingress.
# It allows serving Kubernetes Dashboard on a sub-path. Make sure that the configured path
# does not conflict with gateway route configuration.
path: /
issuer:
name: selfsigned
# Scope determines what kind of issuer annotation will be used on ingress resource
# - default - adds 'cert-manager.io/issuer'
# - cluster - adds 'cert-manager.io/cluster-issuer'
# - disabled - disables cert-manager annotations
scope: default
tls:
enabled: true
# If provided it will override autogenerated secret name
secretName: ""
labels: {}
annotations: {}
# Use the following toleration if Dashboard can be deployed on a tainted control-plane nodes
# - key: node-role.kubernetes.io/control-plane
# effect: NoSchedule
tolerations: []
affinity: {}
auth:
role: auth
image:
repository: docker.io/kubernetesui/dashboard-auth
tag: 1.2.4
scaling:
replicas: 1
revisionHistoryLimit: 10
service:
type: ClusterIP
extraSpec: ~
containers:
ports:
- name: auth
containerPort: 8000
protocol: TCP
args: []
env: []
volumeMounts:
- mountPath: /tmp
name: tmp-volume
# TODO: Validate configuration
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 250m
memory: 400Mi
automountServiceAccountToken: true
volumes:
# Create on-disk volume to store exec logs (required)
- name: tmp-volume
emptyDir: {}
nodeSelector: {}
# Labels & annotations for Auth related resources
labels: {}
annotations: {}
serviceLabels: {}
serviceAnnotations: {}
# API deployment configuration
api:
role: api
image:
repository: docker.io/kubernetesui/dashboard-api
tag: 1.12.0
scaling:
replicas: 1
revisionHistoryLimit: 10
service:
type: ClusterIP
extraSpec: ~
containers:
ports:
- name: api
containerPort: 8000
protocol: TCP
# Additional container arguments
# Full list of arguments: https://github.com/kubernetes/dashboard/blob/master/docs/common/arguments.md
# args:
# - --system-banner="Welcome to the Kubernetes Dashboard"
args: []
# Additional container environment variables
# env:
# - name: SOME_VAR
# value: 'some value'
env: []
# Additional volume mounts
# - mountPath: /kubeconfig
# name: dashboard-kubeconfig
# readOnly: true
volumeMounts:
# Create volume mount to store exec logs (required)
- mountPath: /tmp
name: tmp-volume
# TODO: Validate configuration
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 250m
memory: 400Mi
automountServiceAccountToken: true
# Additional volumes
# - name: dashboard-kubeconfig
# secret:
# defaultMode: 420
# secretName: dashboard-kubeconfig
volumes:
# Create on-disk volume to store exec logs (required)
- name: tmp-volume
emptyDir: {}
nodeSelector: {}
# Labels & annotations for API related resources
labels: {}
annotations: {}
serviceLabels: {}
serviceAnnotations: {}
# WEB UI deployment configuration
web:
role: web
image:
repository: docker.io/kubernetesui/dashboard-web
tag: 1.6.2
scaling:
replicas: 1
revisionHistoryLimit: 10
service:
type: ClusterIP
extraSpec: ~
containers:
ports:
- name: web
containerPort: 8000
protocol: TCP
# Additional container arguments
# Full list of arguments: https://github.com/kubernetes/dashboard/blob/master/docs/common/arguments.md
# args:
# - --system-banner="Welcome to the Kubernetes Dashboard"
args: []
# Additional container environment variables
# env:
# - name: SOME_VAR
# value: 'some value'
env: []
# Additional volume mounts
# - mountPath: /kubeconfig
# name: dashboard-kubeconfig
# readOnly: true
volumeMounts:
# Create volume mount to store logs (required)
- mountPath: /tmp
name: tmp-volume
# TODO: Validate configuration
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 250m
memory: 400Mi
automountServiceAccountToken: true
# Additional volumes
# - name: dashboard-kubeconfig
# secret:
# defaultMode: 420
# secretName: dashboard-kubeconfig
volumes:
# Create on-disk volume to store exec logs (required)
- name: tmp-volume
emptyDir: {}
nodeSelector: {}
# Labels & annotations for WEB UI related resources
labels: {}
annotations: {}
serviceLabels: {}
serviceAnnotations: {}
### Metrics Scraper
### Container to scrape, store, and retrieve a window of time from the Metrics Server.
### refs: https://github.com/kubernetes/dashboard/tree/master/modules/metrics-scraper
metricsScraper:
enabled: true
role: metrics-scraper
image:
repository: docker.io/kubernetesui/dashboard-metrics-scraper
tag: 1.2.2
scaling:
replicas: 1
revisionHistoryLimit: 10
service:
type: ClusterIP
extraSpec: ~
containers:
ports:
- containerPort: 8000
protocol: TCP
args: []
# Additional container environment variables
# env:
# - name: SOME_VAR
# value: 'some value'
env: []
# Additional volume mounts
# - mountPath: /kubeconfig
# name: dashboard-kubeconfig
# readOnly: true
volumeMounts:
# Create volume mount to store logs (required)
- mountPath: /tmp
name: tmp-volume
# TODO: Validate configuration
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 250m
memory: 400Mi
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
automountServiceAccountToken: true
# Additional volumes
# - name: dashboard-kubeconfig
# secret:
# defaultMode: 420
# secretName: dashboard-kubeconfig
volumes:
- name: tmp-volume
emptyDir: {}
nodeSelector: {}
# Labels & annotations for Metrics Scraper related resources
labels: {}
annotations: {}
serviceLabels: {}
serviceAnnotations: {}
## Optional Metrics Server sub-chart configuration
## Enable this if you don't already have metrics-server enabled on your cluster and
## want to use it with dashboard metrics-scraper
## refs:
## - https://github.com/kubernetes-sigs/metrics-server
## - https://github.com/kubernetes-sigs/metrics-server/tree/master/charts/metrics-server
metrics-server:
enabled: false
args:
- --kubelet-preferred-address-types=InternalIP
- --kubelet-insecure-tls
## Required Kong sub-chart with DBless configuration to act as a gateway
## for our all containers.
kong:
enabled: true
## Configuration reference: https://docs.konghq.com/gateway/3.6.x/reference/configuration
env:
dns_order: LAST,A,CNAME,AAAA,SRV
plugins: 'off'
nginx_worker_processes: 1
ingressController:
enabled: false
manager:
enabled: false
dblessConfig:
configMap: kong-dbless-config
proxy:
type: ClusterIP
http:
enabled: false
## Optional Cert Manager sub-chart configuration
## Enable this if you don't already have cert-manager enabled on your cluster.
cert-manager:
enabled: false
installCRDs: true
## Optional Nginx Ingress sub-chart configuration
## Enable this if you don't already have nginx-ingress enabled on your cluster.
nginx:
enabled: false
controller:
electionID: ingress-controller-leader
ingressClassResource:
name: internal-nginx
default: false
controllerValue: k8s.io/internal-ingress-nginx
service:
type: ClusterIP
## Extra configurations:
## - manifests
## - predefined roles
## - prometheus
## - etc...
extras:
# Extra Kubernetes manifests to be deployed
# manifests:
# - apiVersion: v1
# kind: ConfigMap
# metadata:
# name: additional-configmap
# data:
# mykey: myvalue
manifests: []
serviceMonitor:
# Whether to create a Prometheus Operator service monitor.
enabled: false
# Here labels can be added to the serviceMonitor
labels: {}
# Here annotations can be added to the serviceMonitor
annotations: {}
# metrics.serviceMonitor.metricRelabelings Specify Metric Relabelings to add to the scrape endpoint
# ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
metricRelabelings: []
# metrics.serviceMonitor.relabelings [array] Prometheus relabeling rules
relabelings: []
# ServiceMonitor connection scheme. Defaults to HTTPS.
scheme: https
# ServiceMonitor connection tlsConfig. Defaults to {insecureSkipVerify:true}.
tlsConfig:
insecureSkipVerify: true

1
k3s/install.sh Executable file
View File

@@ -0,0 +1 @@
curl -sfL https://get.k3s.io | sh -

1
k3s/installHelm.sh Executable file
View File

@@ -0,0 +1 @@
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash

120
k3s/k8sUser/addUser.sh Executable file
View File

@@ -0,0 +1,120 @@
#!/bin/bash
TARGET_USER="$1"
if [ -z "$TARGET_USER" ]; then
echo "❌ Bitte gib den Namen eines Linux-Users als Argument an."
echo " Beispiel: sudo ./setup-k8s-user-kubeconfig.sh dashboarduser"
exit 1
fi
USERNAME="$TARGET_USER"
NAMESPACE="kube-system"
SECRET_NAME="${USERNAME}-token"
CONFIG_PATH="/home/${TARGET_USER}/.kube/config"
BASHRC_PATH="/home/${TARGET_USER}/.bashrc"
function check_user_exists() {
id "$1" &>/dev/null || {
echo "❌ Linux-User '$1' existiert nicht!"
exit 1
}
}
function create_k8s_resources() {
echo "🔧 Erstelle ServiceAccount und ClusterRoleBinding für '$USERNAME'..."
kubectl create serviceaccount "${USERNAME}" -n "${NAMESPACE}" --dry-run=client -o yaml | kubectl apply -f -
kubectl create clusterrolebinding "${USERNAME}-binding" \
--clusterrole=cluster-admin \
--serviceaccount="${NAMESPACE}:${USERNAME}" \
--dry-run=client -o yaml | kubectl apply -f -
}
function create_static_token_secret() {
echo "🔐 Erstelle statisches Token (Secret) für '$USERNAME'..."
# Prüfen ob Secret schon existiert
if ! kubectl get secret "${SECRET_NAME}" -n "${NAMESPACE}" &>/dev/null; then
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Secret
metadata:
name: ${SECRET_NAME}
namespace: ${NAMESPACE}
annotations:
kubernetes.io/service-account.name: "${USERNAME}"
type: kubernetes.io/service-account-token
EOF
fi
echo "⏳ Warte, bis Token im Secret verfügbar ist..."
for i in {1..10}; do
TOKEN=$(kubectl get secret "${SECRET_NAME}" -n "${NAMESPACE}" -o jsonpath="{.data.token}" | base64 -d 2>/dev/null)
[ -n "$TOKEN" ] && break
sleep 1
done
if [ -z "$TOKEN" ]; then
echo "❌ Token konnte nicht aus dem Secret gelesen werden."
exit 1
fi
}
function get_cluster_info() {
echo "🌐 Lese Cluster-Info..."
SERVER=$(kubectl config view --raw -o jsonpath='{.clusters[0].cluster.server}')
CA=$(kubectl config view --raw -o jsonpath='{.clusters[0].cluster.certificate-authority-data}')
}
function write_kubeconfig() {
echo "📝 Schreibe Kubeconfig nach ${CONFIG_PATH}..."
sudo -u "${TARGET_USER}" mkdir -p "/home/${TARGET_USER}/.kube"
cat <<EOF | sudo tee "${CONFIG_PATH}" > /dev/null
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority-data: ${CA}
server: ${SERVER}
name: k3s
contexts:
- context:
cluster: k3s
user: ${USERNAME}
name: ${USERNAME}@k3s
current-context: ${USERNAME}@k3s
users:
- name: ${USERNAME}
user:
token: ${TOKEN}
EOF
sudo chown "${TARGET_USER}:${TARGET_USER}" "${CONFIG_PATH}"
echo "✅ Kubeconfig für ${TARGET_USER} mit statischem Token erstellt."
}
function add_kubectl_hint_to_bashrc() {
if ! sudo grep -q 'kubectl' "${BASHRC_PATH}" 2>/dev/null; then
echo "🧠 Füge kubectl-Alias zur bashrc hinzu..."
echo "" | sudo tee -a "${BASHRC_PATH}" > /dev/null
echo "# kubectl completion & config (automatisch hinzugefügt)" | sudo tee -a "${BASHRC_PATH}" > /dev/null
echo "export KUBECONFIG=\$HOME/.kube/config" | sudo tee -a "${BASHRC_PATH}" > /dev/null
echo "source <(kubectl completion bash)" | sudo tee -a "${BASHRC_PATH}" > /dev/null
fi
}
# === Ausführung ===
check_user_exists "${TARGET_USER}"
create_k8s_resources
create_static_token_secret
get_cluster_info
write_kubeconfig
add_kubectl_hint_to_bashrc
echo "🚀 Alles erledigt für Benutzer '${TARGET_USER}'!"
echo "💡 Melde dich mit dem Token im Kubernetes Dashboard an, oder nutze:"
echo " kubectl get pods -A"

14
k3s/nfs-pv/nfs-pv.yaml Normal file
View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv
spec:
capacity:
storage: 100Gi # Irgendwas großzügiges, spielt bei NFS weniger Rolle
accessModes:
- ReadWriteMany # GANZ WICHTIG!
storageClassName: nfs
persistentVolumeReclaimPolicy: Retain # Optional: verhindert, dass K8s dein Share löscht
nfs:
path: /fastData/photos
server: 192.168.178.132

1
k3s/photo/icloudpd/base64pw.sh Executable file
View File

@@ -0,0 +1 @@
echo -n 'Z6x3h5xy569' | base64

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: Secret
metadata:
name: icloudpd-secret
namespace: photoprism
type: Opaque
data:
apple_password: WjZ4M2g1eHk1Njk=

View File

@@ -0,0 +1,59 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: icloudpd
namespace: photoprism
spec:
replicas: 1
selector:
matchLabels:
app: icloudpd
template:
metadata:
labels:
app: icloudpd
spec:
containers:
- name: icloudpd
image: r3d454/dockericloudpd:latest
env:
- name: apple_id
value: "Henry-Winkel@web.de"
- name: apple_password
valueFrom:
secretKeyRef:
name: icloudpd-secret
key: apple_password
- name: download_path
value: "/data/originals"
- name: authentication_type
value: "Web"
- name: directory_permissions
value: "777"
- name: file_permissions
value: "777"
- name: set_exif_datetime
value: "false"
- name: auto_delete
value: "False"
volumeMounts:
- name: icloudpd-storage
mountPath: /data/
volumes:
- name: icloudpd-storage
persistentVolumeClaim:
claimName: photoprism-storage
---
apiVersion: v1
kind: Service
metadata:
name: icloudpd-service
namespace: photoprism
spec:
selector:
app: icloudpd
ports:
- protocol: TCP
port: 8080
targetPort: 8080
type: NodePort

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: mariadb-pv
spec:
capacity:
storage: 50Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain # Optional: verhindert, dass K8s dein Share löscht
storageClassName: local-path
hostPath:
path: /slowData/photoprismDB

View File

@@ -0,0 +1,60 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mariadb
namespace: photoprism
spec:
storageClassName: local-path
volumeName: mariadb-pv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mariadb
namespace: photoprism
spec:
replicas: 1
selector:
matchLabels:
app: mariadb
template:
metadata:
labels:
app: mariadb
spec:
containers:
- name: mariadb
image: mariadb:10.11
env:
- name: MYSQL_ROOT_PASSWORD
value: photoprism
- name: MYSQL_DATABASE
value: photoprism
- name: MYSQL_USER
value: photoprism
- name: MYSQL_PASSWORD
value: photoprism
volumeMounts:
- mountPath: /var/lib/mysql
name: mariadb
volumes:
- name: mariadb
persistentVolumeClaim:
claimName: mariadb
---
apiVersion: v1
kind: Service
metadata:
name: mariadb
namespace: photoprism
spec:
type: ClusterIP
selector:
app: mariadb
ports:
- port: 3306

View File

@@ -0,0 +1,98 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: photoprism-storage
namespace: photoprism
spec:
storageClassName: nfs
volumeName: nfs-pv
accessModes:
- ReadWriteMany
resources:
requests:
storage: 50Gi
---
#apiVersion: v1
#kind: PersistentVolumeClaim
#metadata:
# name: photoprism-originals
# namespace: photoprism
#spec:
# storageClassName: nfs
# volumeName: nfs-pv
# accessModes:
# - ReadWriteMany
# resources:
# requests:
# storage: 100Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: photoprism
namespace: photoprism
spec:
replicas: 1
selector:
matchLabels:
app: photoprism
template:
metadata:
labels:
app: photoprism
spec:
containers:
- name: photoprism
image: photoprism/photoprism
env:
- name: PHOTOPRISM_ADMIN_USER
value: admin
- name: PHOTOPRISM_ADMIN_PASSWORD
value: photoprism
- name: PHOTOPRISM_AUTH_MODE
value: password
- name: PHOTOPRISM_SITE_URL
value: https://photoprism.henryathome.home64.de
- name: PHOTOPRISM_INDEX_WORKERS
value: "3"
- name: PHOTOPRISM_INDEX_SCHEDULE
value: "@every 3h"
# DB
- name: PHOTOPRISM_DATABASE_DRIVER
value: mysql
- name: PHOTOPRISM_DATABASE_SERVER
value: mariadb:3306
- name: PHOTOPRISM_DATABASE_NAME
value: photoprism
- name: PHOTOPRISM_DATABASE_USER
value: photoprism
- name: PHOTOPRISM_DATABASE_PASSWORD
value: photoprism
volumeMounts:
- mountPath: /photoprism/
name: photoprism-storage
# - mountPath: /photoprism/storage
# name: photoprism-storage
# - mountPath: /photoprism/originals
# name: photoprism-storage
# name: photoprism-originals
volumes:
- name: photoprism-storage
persistentVolumeClaim:
claimName: photoprism-storage
# - name: photoprism-originals
# persistentVolumeClaim:
# claimName: photoprism-originals
---
apiVersion: v1
kind: Service
metadata:
name: photoprism
namespace: photoprism
spec:
type: NodePort
selector:
app: photoprism
ports:
- port: 2342

38
nfs/nfsClient2.sh Executable file
View File

@@ -0,0 +1,38 @@
#!/bin/bash
# ==== Konfigurierbare Standardwerte ====
SERVER_IP="192.168.178.132"
REMOTE_PATH="fastData"
LOCAL_MOUNT="/fastData"
AUTO_MOUNT="${AUTO_MOUNT:-}"
# ==== Interaktive Abfrage, falls Variablen fehlen ====
[[ -z "$SERVER_IP" ]] && read -rp "Server-IP-Adresse: " SERVER_IP
[[ -z "$REMOTE_PATH" ]] && read -rp "Remote NFS Pfad (z.B. /fastData): " REMOTE_PATH
[[ -z "$LOCAL_MOUNT" ]] && read -rp "Lokales Mount-Verzeichnis (z.B. /mnt/nfs): " LOCAL_MOUNT
[[ -z "$AUTO_MOUNT" ]] && read -rp "Automatisch beim Booten mounten? (ja/nein): " AUTO_MOUNT
# ==== Verzeichnis vorbereiten ====
sudo mkdir -p "$LOCAL_MOUNT"
# Mount durchführen
echo "Mounten von $SERVER_IP:$REMOTE_PATH nach $LOCAL_MOUNT ..."
sudo mount -t nfs "$SERVER_IP:$REMOTE_PATH" "$LOCAL_MOUNT"
if [[ $? -ne 0 ]]; then
echo "❌ Fehler beim Mounten!"
exit 1
fi
# Berechtigungen setzen, damit jeder schreiben darf
sudo chmod 777 "$LOCAL_MOUNT"
echo "✅ Erfolgreich gemountet. Alle Benutzer haben vollen Zugriff auf $LOCAL_MOUNT"
# ==== Automatisches Mounten einrichten ====
if [[ "$AUTO_MOUNT" == "ja" ]]; then
echo "Füge Mount zur /etc/fstab hinzu..."
LINE="$SERVER_IP:$REMOTE_PATH $LOCAL_MOUNT nfs defaults,_netdev 0 0"
grep -qxF "$LINE" /etc/fstab || echo "$LINE" | sudo tee -a /etc/fstab
echo "✅ Automatisches Mounten aktiviert."
fi

38
nfs/nfsClient2SlowData.sh Executable file
View File

@@ -0,0 +1,38 @@
#!/bin/bash
# ==== Konfigurierbare Standardwerte ====
SERVER_IP="192.168.178.132"
REMOTE_PATH="slowData"
LOCAL_MOUNT="/slowData"
AUTO_MOUNT="${AUTO_MOUNT:-}"
# ==== Interaktive Abfrage, falls Variablen fehlen ====
[[ -z "$SERVER_IP" ]] && read -rp "Server-IP-Adresse: " SERVER_IP
[[ -z "$REMOTE_PATH" ]] && read -rp "Remote NFS Pfad (z.B. /fastData): " REMOTE_PATH
[[ -z "$LOCAL_MOUNT" ]] && read -rp "Lokales Mount-Verzeichnis (z.B. /mnt/nfs): " LOCAL_MOUNT
[[ -z "$AUTO_MOUNT" ]] && read -rp "Automatisch beim Booten mounten? (ja/nein): " AUTO_MOUNT
# ==== Verzeichnis vorbereiten ====
sudo mkdir -p "$LOCAL_MOUNT"
# Mount durchführen
echo "Mounten von $SERVER_IP:$REMOTE_PATH nach $LOCAL_MOUNT ..."
sudo mount -t nfs "$SERVER_IP:$REMOTE_PATH" "$LOCAL_MOUNT"
if [[ $? -ne 0 ]]; then
echo "❌ Fehler beim Mounten!"
exit 1
fi
# Berechtigungen setzen, damit jeder schreiben darf
sudo chmod 777 "$LOCAL_MOUNT"
echo "✅ Erfolgreich gemountet. Alle Benutzer haben vollen Zugriff auf $LOCAL_MOUNT"
# ==== Automatisches Mounten einrichten ====
if [[ "$AUTO_MOUNT" == "ja" ]]; then
echo "Füge Mount zur /etc/fstab hinzu..."
LINE="$SERVER_IP:$REMOTE_PATH $LOCAL_MOUNT nfs defaults,_netdev 0 0"
grep -qxF "$LINE" /etc/fstab || echo "$LINE" | sudo tee -a /etc/fstab
echo "✅ Automatisches Mounten aktiviert."
fi