
-------------------------------------
1) Describe 'Deployments' resources
-------------------------------------

Name:                   coredns
Namespace:              kube-system
CreationTimestamp:      Sun, 08 Oct 2023 00:18:15 -0400
Labels:                 k8s-app=kube-dns
                        kubernetes.io/name=CoreDNS
                        objectset.rio.cattle.io/hash=bce283298811743a0386ab510f2f67ef74240c57
Annotations:            deployment.kubernetes.io/revision: 2
                        objectset.rio.cattle.io/applied:
                          H4sIAAAAAAAA/6xVQW/bOBP9Kx/mLMVW0jaugO/QtbPboq3XqJNeCqOgqZHFNcXhkiMnRqD/vhjJduw2TdrFniyTb4ZvHucN70F58xlDNOQgB+V9HGwySGBtXAE5TNBb2tboGBKokV...
                        objectset.rio.cattle.io/id: 
                        objectset.rio.cattle.io/owner-gvk: k3s.cattle.io/v1, Kind=Addon
                        objectset.rio.cattle.io/owner-name: coredns
                        objectset.rio.cattle.io/owner-namespace: kube-system
Selector:               k8s-app=kube-dns
Replicas:               1 desired | 1 updated | 1 total | 1 available | 0 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        0
RollingUpdateStrategy:  1 max unavailable, 25% max surge
Pod Template:
  Labels:           k8s-app=kube-dns
  Service Account:  coredns
  Containers:
   coredns:
    Image:       rancher/mirrored-coredns-coredns:1.10.1
    Ports:       53/UDP, 53/TCP, 9153/TCP
    Host Ports:  0/UDP, 0/TCP, 0/TCP
    Args:
      -conf
      /etc/coredns/Corefile
    Limits:
      memory:  170Mi
    Requests:
      cpu:        100m
      memory:     70Mi
    Liveness:     http-get http://:8080/health delay=60s timeout=1s period=10s #success=1 #failure=3
    Readiness:    http-get http://:8181/ready delay=0s timeout=1s period=2s #success=1 #failure=3
    Environment:  <none>
    Mounts:
      /etc/coredns from config-volume (ro)
      /etc/coredns/custom from custom-config-volume (ro)
  Volumes:
   config-volume:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      coredns
    Optional:  false
   custom-config-volume:
    Type:                       ConfigMap (a volume populated by a ConfigMap)
    Name:                       coredns-custom
    Optional:                   true
  Topology Spread Constraints:  kubernetes.io/hostname:DoNotSchedule when max skew 1 is exceeded for selector k8s-app=kube-dns
  Priority Class Name:          system-cluster-critical
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Available      True    MinimumReplicasAvailable
  Progressing    True    NewReplicaSetAvailable
OldReplicaSets:  <none>
NewReplicaSet:   coredns-59b4f5bbd5 (1/1 replicas created)
Events:          <none>


Name:                   csi-smb-controller
Namespace:              kube-system
CreationTimestamp:      Tue, 19 Dec 2023 22:45:00 -0500
Labels:                 objectset.rio.cattle.io/hash=dcff83f97210cf176ea098fd305c31dad9e74663
Annotations:            deployment.kubernetes.io/revision: 1
                        objectset.rio.cattle.io/applied:
                          H4sIAAAAAAAA/8RW32/bNhD+V4ZDHzpAcqw4SRsBfijibAu2ekYdbA+FMdDU2bqZIjnypMYL9L8PpGRHwZIu7TAMQQD6jvfr+z6ddA/C0i/oPBkNOQhr/UmTQQI70gXkMEOrzL5CzZ...
                        objectset.rio.cattle.io/id: 
                        objectset.rio.cattle.io/owner-gvk: k3s.cattle.io/v1, Kind=Addon
                        objectset.rio.cattle.io/owner-name: smb-csi
                        objectset.rio.cattle.io/owner-namespace: kube-system
Selector:               app=csi-smb-controller
Replicas:               1 desired | 1 updated | 1 total | 1 available | 0 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        0
RollingUpdateStrategy:  25% max unavailable, 25% max surge
Pod Template:
  Labels:           app=csi-smb-controller
  Service Account:  csi-smb-controller-sa
  Containers:
   csi-provisioner:
    Image:      registry.k8s.io/sig-storage/csi-provisioner:v3.5.0
    Port:       <none>
    Host Port:  <none>
    Args:
      -v=2
      --csi-address=$(ADDRESS)
      --leader-election
      --leader-election-namespace=kube-system
      --extra-create-metadata=true
    Limits:
      cpu:     1
      memory:  300Mi
    Requests:
      cpu:     10m
      memory:  20Mi
    Environment:
      ADDRESS:  /csi/csi.sock
    Mounts:
      /csi from socket-dir (rw)
   liveness-probe:
    Image:      registry.k8s.io/sig-storage/livenessprobe:v2.10.0
    Port:       <none>
    Host Port:  <none>
    Args:
      --csi-address=/csi/csi.sock
      --probe-timeout=3s
      --health-port=29642
      --v=2
    Limits:
      cpu:     1
      memory:  100Mi
    Requests:
      cpu:        10m
      memory:     20Mi
    Environment:  <none>
    Mounts:
      /csi from socket-dir (rw)
   smb:
    Image:       registry.k8s.io/sig-storage/smbplugin:v1.11.0
    Ports:       29642/TCP, 29644/TCP
    Host Ports:  29642/TCP, 29644/TCP
    Args:
      --v=5
      --endpoint=$(CSI_ENDPOINT)
      --metrics-address=0.0.0.0:29644
    Limits:
      memory:  200Mi
    Requests:
      cpu:     10m
      memory:  20Mi
    Liveness:  http-get http://:healthz/healthz delay=30s timeout=10s period=30s #success=1 #failure=5
    Environment:
      CSI_ENDPOINT:  unix:///csi/csi.sock
    Mounts:
      /csi from socket-dir (rw)
  Volumes:
   socket-dir:
    Type:               EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:             
    SizeLimit:          <unset>
  Priority Class Name:  system-cluster-critical
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Progressing    True    NewReplicaSetAvailable
  Available      True    MinimumReplicasAvailable
OldReplicaSets:  <none>
NewReplicaSet:   csi-smb-controller-7fbbb8fb6f (1/1 replicas created)
Events:          <none>


Name:                   snapshot-controller
Namespace:              kube-system
CreationTimestamp:      Tue, 19 Dec 2023 22:45:00 -0500
Labels:                 objectset.rio.cattle.io/hash=e20aeb27d825538dc5fa9f4d82c2fe2058f8374c
Annotations:            deployment.kubernetes.io/revision: 1
                        objectset.rio.cattle.io/applied:
                          H4sIAAAAAAAA/7RUzW7bOBB+lcWcJceW442XgA/BZk/bBkWM9BLkMKZGMmuKZDgjNYKhdy8oO60D5A8FehM5w+9nPlJ7wGC+UmTjHSjAEPism0EGO+NKUHBFwfq+ISeQQUOCJQqC2g...
                        objectset.rio.cattle.io/id: 
                        objectset.rio.cattle.io/owner-gvk: k3s.cattle.io/v1, Kind=Addon
                        objectset.rio.cattle.io/owner-name: nfs-csi
                        objectset.rio.cattle.io/owner-namespace: kube-system
Selector:               app=snapshot-controller
Replicas:               2 desired | 2 updated | 2 total | 2 available | 0 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        15
RollingUpdateStrategy:  1 max unavailable, 0 max surge
Pod Template:
  Labels:           app=snapshot-controller
  Service Account:  snapshot-controller
  Containers:
   snapshot-controller:
    Image:      registry.k8s.io/sig-storage/snapshot-controller:v6.2.2
    Port:       <none>
    Host Port:  <none>
    Args:
      --v=2
      --leader-election=true
      --leader-election-namespace=kube-system
    Limits:
      memory:  300Mi
    Requests:
      cpu:              10m
      memory:           20Mi
    Environment:        <none>
    Mounts:             <none>
  Volumes:              <none>
  Priority Class Name:  system-cluster-critical
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Progressing    True    NewReplicaSetAvailable
  Available      True    MinimumReplicasAvailable
OldReplicaSets:  <none>
NewReplicaSet:   snapshot-controller-546868dfb4 (2/2 replicas created)
Events:          <none>


Name:                   csi-nfs-controller
Namespace:              kube-system
CreationTimestamp:      Tue, 19 Dec 2023 22:45:00 -0500
Labels:                 objectset.rio.cattle.io/hash=e20aeb27d825538dc5fa9f4d82c2fe2058f8374c
Annotations:            deployment.kubernetes.io/revision: 1
                        objectset.rio.cattle.io/applied:
                          H4sIAAAAAAAA/8RW4W/bthP9V3449EMLSLbjxG0qwB/yi9Mt2OoadbFhKIKCJk/2zRTJkZQaL9D/PhwlJ87WpEGBoh8MyDze8d177yjdgHD0G/pA1kABwrkwbI4ggy0ZBQXM0Gm7q9...
                        objectset.rio.cattle.io/id: 
                        objectset.rio.cattle.io/owner-gvk: k3s.cattle.io/v1, Kind=Addon
                        objectset.rio.cattle.io/owner-name: nfs-csi
                        objectset.rio.cattle.io/owner-namespace: kube-system
Selector:               app=csi-nfs-controller
Replicas:               1 desired | 1 updated | 1 total | 1 available | 0 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        0
RollingUpdateStrategy:  25% max unavailable, 25% max surge
Pod Template:
  Labels:           app=csi-nfs-controller
  Service Account:  csi-nfs-controller-sa
  Containers:
   csi-provisioner:
    Image:      registry.k8s.io/sig-storage/csi-provisioner:v3.5.0
    Port:       <none>
    Host Port:  <none>
    Args:
      -v=2
      --csi-address=$(ADDRESS)
      --leader-election
      --leader-election-namespace=kube-system
      --extra-create-metadata=true
    Limits:
      memory:  400Mi
    Requests:
      cpu:     10m
      memory:  20Mi
    Environment:
      ADDRESS:  /csi/csi.sock
    Mounts:
      /csi from socket-dir (rw)
   csi-snapshotter:
    Image:      registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2
    Port:       <none>
    Host Port:  <none>
    Args:
      --v=2
      --csi-address=$(ADDRESS)
      --leader-election-namespace=kube-system
      --leader-election
    Limits:
      memory:  200Mi
    Requests:
      cpu:     10m
      memory:  20Mi
    Environment:
      ADDRESS:  /csi/csi.sock
    Mounts:
      /csi from socket-dir (rw)
   liveness-probe:
    Image:      registry.k8s.io/sig-storage/livenessprobe:v2.10.0
    Port:       <none>
    Host Port:  <none>
    Args:
      --csi-address=/csi/csi.sock
      --probe-timeout=3s
      --health-port=29652
      --v=2
    Limits:
      memory:  100Mi
    Requests:
      cpu:        10m
      memory:     20Mi
    Environment:  <none>
    Mounts:
      /csi from socket-dir (rw)
   nfs:
    Image:      registry.k8s.io/sig-storage/nfsplugin:v4.4.0
    Port:       29652/TCP
    Host Port:  29652/TCP
    Args:
      -v=5
      --nodeid=$(NODE_ID)
      --endpoint=$(CSI_ENDPOINT)
    Limits:
      memory:  200Mi
    Requests:
      cpu:     10m
      memory:  20Mi
    Liveness:  http-get http://:healthz/healthz delay=30s timeout=10s period=30s #success=1 #failure=5
    Environment:
      NODE_ID:        (v1:spec.nodeName)
      CSI_ENDPOINT:  unix:///csi/csi.sock
    Mounts:
      /csi from socket-dir (rw)
      /var/lib/kubelet/pods from pods-mount-dir (rw)
  Volumes:
   pods-mount-dir:
    Type:          HostPath (bare host directory volume)
    Path:          /var/lib/kubelet/pods
    HostPathType:  Directory
   socket-dir:
    Type:               EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:             
    SizeLimit:          <unset>
  Priority Class Name:  system-cluster-critical
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Progressing    True    NewReplicaSetAvailable
  Available      True    MinimumReplicasAvailable
OldReplicaSets:  <none>
NewReplicaSet:   csi-nfs-controller-7b74694749 (1/1 replicas created)
Events:          <none>


Name:               openspeedtest
Namespace:          ix-openspeedtest
CreationTimestamp:  Sun, 08 Oct 2023 02:23:49 -0400
Labels:             app=openspeedtest-6.0.10
                    app.kubernetes.io/instance=openspeedtest
                    app.kubernetes.io/managed-by=Helm
                    app.kubernetes.io/name=openspeedtest
                    app.kubernetes.io/version=latest
                    helm-revision=15
                    helm.sh/chart=openspeedtest-6.0.10
                    release=openspeedtest
Annotations:        deployment.kubernetes.io/revision: 12
                    meta.helm.sh/release-name: openspeedtest
                    meta.helm.sh/release-namespace: ix-openspeedtest
Selector:           app.kubernetes.io/instance=openspeedtest,app.kubernetes.io/name=openspeedtest,pod.name=main
Replicas:           1 desired | 1 updated | 1 total | 1 available | 0 unavailable
StrategyType:       Recreate
MinReadySeconds:    0
Pod Template:
  Labels:           app=openspeedtest-6.0.10
                    app.kubernetes.io/instance=openspeedtest
                    app.kubernetes.io/managed-by=Helm
                    app.kubernetes.io/name=openspeedtest
                    app.kubernetes.io/version=latest
                    helm-revision=15
                    helm.sh/chart=openspeedtest-6.0.10
                    pod.name=main
                    release=openspeedtest
  Annotations:      rollme: itHyW
  Service Account:  default
  Containers:
   openspeedtest:
    Image:      openspeedtest/latest:latest@sha256:948800e96e8369171407bba9607a05fe2a6c742fa0a038869ffcbb3ab2585c82
    Port:       3000/TCP
    Host Port:  0/TCP
    Limits:
      cpu:     4
      memory:  8Gi
    Requests:
      cpu:      10m
      memory:   50Mi
    Liveness:   http-get http://:3000/ delay=10s timeout=5s period=10s #success=1 #failure=5
    Readiness:  http-get http://:3000/ delay=10s timeout=5s period=10s #success=2 #failure=5
    Startup:    tcp-socket :3000 delay=10s timeout=2s period=5s #success=1 #failure=60
    Environment:
      TZ:                      America/New_York
      UMASK:                   0022
      UMASK_SET:               0022
      NVIDIA_VISIBLE_DEVICES:  void
      PUID:                    568
      USER_ID:                 568
      UID:                     568
      PGID:                    568
      GROUP_ID:                568
      GID:                     568
    Mounts:
      /dev/shm from devshm (rw)
      /shared from shared (rw)
      /tmp from tmp (rw)
      /var/logs from varlogs (rw)
      /var/run from varrun (rw)
  Volumes:
   devshm:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
   shared:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
   tmp:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
   varlogs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
   varrun:
    Type:                       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:                     Memory
    SizeLimit:                  8Gi
  Topology Spread Constraints:  kubernetes.io/hostname:ScheduleAnyway when max skew 1 is exceeded for selector app.kubernetes.io/instance=openspeedtest,app.kubernetes.io/name=openspeedtest,pod.name=openspeedtest
                                truecharts.org/rack:ScheduleAnyway when max skew 1 is exceeded for selector app.kubernetes.io/instance=openspeedtest,app.kubernetes.io/name=openspeedtest,pod.name=openspeedtest
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Progressing    True    NewReplicaSetAvailable
  Available      True    MinimumReplicasAvailable
OldReplicaSets:  <none>
NewReplicaSet:   openspeedtest-677989cc6f (1/1 replicas created)
Events:
  Type    Reason             Age   From                   Message
  ----    ------             ----  ----                   -------
  Normal  ScalingReplicaSet  34m   deployment-controller  Scaled down replica set openspeedtest-7cf887d7f to 0 from 1
  Normal  ScalingReplicaSet  32m   deployment-controller  Scaled up replica set openspeedtest-677989cc6f to 1


Name:               unifi
Namespace:          ix-unifi
CreationTimestamp:  Sat, 16 Dec 2023 17:01:47 -0500
Labels:             app=unifi-18.0.10
                    app.kubernetes.io/instance=unifi
                    app.kubernetes.io/managed-by=Helm
                    app.kubernetes.io/name=unifi
                    app.kubernetes.io/version=8.0.7
                    helm-revision=12
                    helm.sh/chart=unifi-18.0.10
                    release=unifi
Annotations:        deployment.kubernetes.io/revision: 11
                    meta.helm.sh/release-name: unifi
                    meta.helm.sh/release-namespace: ix-unifi
Selector:           app.kubernetes.io/instance=unifi,app.kubernetes.io/name=unifi,pod.name=main
Replicas:           1 desired | 1 updated | 1 total | 1 available | 0 unavailable
StrategyType:       Recreate
MinReadySeconds:    0
Pod Template:
  Labels:           app=unifi-18.0.10
                    app.kubernetes.io/instance=unifi
                    app.kubernetes.io/managed-by=Helm
                    app.kubernetes.io/name=unifi
                    app.kubernetes.io/version=8.0.7
                    helm-revision=12
                    helm.sh/chart=unifi-18.0.10
                    pod.name=main
                    release=unifi
  Annotations:      rollme: O4PII
  Service Account:  default
  Init Containers:
   unifi-init-migrate:
    Image:      tccr.io/truecharts/alpine:v3.18.4@sha256:51c4ca9a8213d3f4026bd560e8aa2de365d275d6cd0298eff2ae20671fef34e9
    Port:       <none>
    Host Port:  <none>
    Command:
      /bin/sh
    Args:
      -c
      newdatadir="/usr/lib/unifi/data"
      olddatadir="/usr/lib/unifi/olddata/data"
      # Check the dir exists
      [ ! -d "$newdatadir" ] && echo "$newdatadir missing" && exit 1
      # Check if there is a data/data dir to migrate
      [ ! -d "$olddatadir" ] && echo "No $olddatadir dir found. Migration skipped" && exit 0
      
      # Check if the new data dir is empty, ignoring the old data dir
      dirs=$(ls -A "$newdatadir" | grep -v "data")
      if [ -n "$dirs" ]; then
        echo "New data dir is empty. Migrating data one level up"
        cp -rf $olddatadir/* $newdatadir || echo "Failed to move data" && exit 1
        echo "Data migration complete"
      fi
      
    Limits:
      cpu:     4
      memory:  8Gi
    Requests:
      cpu:     10m
      memory:  50Mi
    Environment:
      TZ:                      America/New_York
      UMASK:                   0022
      UMASK_SET:               0022
      NVIDIA_VISIBLE_DEVICES:  void
      S6_READ_ONLY_ROOT:       1
    Mounts:
      /dev/shm from devshm (rw)
      /shared from shared (rw)
      /tmp from tmp (rw)
      /usr/lib/unifi/data from data (rw)
      /usr/lib/unifi/olddata from config (rw)
      /var/logs from varlogs (rw)
      /var/run from varrun (rw)
  Containers:
   unifi:
    Image:       ghcr.io/goofball222/unifi:8.0.7@sha256:218565257fdb4b2c7f27b8e4fdb4925e9348fd7cfb3ed027974bebf78bdc45ba
    Ports:       8080/TCP, 8880/TCP, 8843/TCP, 8443/TCP, 6789/TCP, 3478/UDP
    Host Ports:  8080/TCP, 8880/TCP, 8843/TCP, 8443/TCP, 6789/TCP, 3478/UDP
    Limits:
      cpu:     4
      memory:  8Gi
    Requests:
      cpu:      10m
      memory:   50Mi
    Liveness:   http-get https://:8443/ delay=10s timeout=5s period=10s #success=1 #failure=5
    Readiness:  http-get https://:8443/ delay=10s timeout=5s period=10s #success=2 #failure=5
    Startup:    tcp-socket :8443 delay=10s timeout=2s period=5s #success=1 #failure=60
    Environment:
      TZ:                      America/New_York
      UMASK:                   0022
      UMASK_SET:               0022
      NVIDIA_VISIBLE_DEVICES:  void
      S6_READ_ONLY_ROOT:       1
      DB_MONGO_LOCAL:          true
      RUN_CHOWN:               true
    Mounts:
      /dev/shm from devshm (rw)
      /shared from shared (rw)
      /tmp from tmp (rw)
      /usr/lib/unifi/cert from certs (rw)
      /usr/lib/unifi/data from data (rw)
      /usr/lib/unifi/logs from logs (rw)
      /usr/lib/unifi/olddata from config (rw)
      /var/logs from varlogs (rw)
      /var/run from varrun (rw)
  Volumes:
   certs:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  unifi-certs
    ReadOnly:   false
   config:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  unifi-config
    ReadOnly:   false
   data:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  unifi-data
    ReadOnly:   false
   devshm:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
   logs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
   shared:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
   tmp:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
   varlogs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
   varrun:
    Type:                       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:                     Memory
    SizeLimit:                  8Gi
  Topology Spread Constraints:  kubernetes.io/hostname:ScheduleAnyway when max skew 1 is exceeded for selector app.kubernetes.io/instance=unifi,app.kubernetes.io/name=unifi,pod.name=unifi
                                truecharts.org/rack:ScheduleAnyway when max skew 1 is exceeded for selector app.kubernetes.io/instance=unifi,app.kubernetes.io/name=unifi,pod.name=unifi
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Progressing    True    NewReplicaSetAvailable
  Available      True    MinimumReplicasAvailable
OldReplicaSets:  <none>
NewReplicaSet:   unifi-749498766b (1/1 replicas created)
Events:
  Type    Reason             Age   From                   Message
  ----    ------             ----  ----                   -------
  Normal  ScalingReplicaSet  34m   deployment-controller  Scaled down replica set unifi-6679cfcb8c to 0 from 1
  Normal  ScalingReplicaSet  33m   deployment-controller  Scaled up replica set unifi-749498766b to 1


Name:               makemkv
Namespace:          ix-makemkv
CreationTimestamp:  Sun, 08 Oct 2023 13:45:17 -0400
Labels:             app=makemkv-7.0.10
                    app.kubernetes.io/instance=makemkv
                    app.kubernetes.io/managed-by=Helm
                    app.kubernetes.io/name=makemkv
                    app.kubernetes.io/version=23.11.2
                    helm-revision=20
                    helm.sh/chart=makemkv-7.0.10
                    release=makemkv
Annotations:        deployment.kubernetes.io/revision: 20
                    meta.helm.sh/release-name: makemkv
                    meta.helm.sh/release-namespace: ix-makemkv
Selector:           app.kubernetes.io/instance=makemkv,app.kubernetes.io/name=makemkv,pod.name=main
Replicas:           1 desired | 1 updated | 1 total | 1 available | 0 unavailable
StrategyType:       Recreate
MinReadySeconds:    0
Pod Template:
  Labels:           app=makemkv-7.0.10
                    app.kubernetes.io/instance=makemkv
                    app.kubernetes.io/managed-by=Helm
                    app.kubernetes.io/name=makemkv
                    app.kubernetes.io/version=23.11.2
                    helm-revision=20
                    helm.sh/chart=makemkv-7.0.10
                    pod.name=main
                    release=makemkv
  Annotations:      rollme: 3n1Fm
  Service Account:  default
  Containers:
   makemkv:
    Image:       jlesage/makemkv:v23.11.2@sha256:777c9cba43c8288e9e3d4ea7347ad427938b18bcf84b3b7b1bc69e7d9bbb5762
    Ports:       5800/TCP, 5900/TCP
    Host Ports:  0/TCP, 0/TCP
    Limits:
      cpu:     4
      memory:  8Gi
    Requests:
      cpu:      10m
      memory:   50Mi
    Liveness:   tcp-socket :5800 delay=10s timeout=5s period=10s #success=1 #failure=5
    Readiness:  tcp-socket :5800 delay=10s timeout=5s period=10s #success=2 #failure=5
    Startup:    tcp-socket :5800 delay=10s timeout=2s period=5s #success=1 #failure=60
    Environment Variables from:
      makemkv-makemkv  ConfigMap  Optional: false
    Environment:
      TZ:                      America/New_York
      UMASK:                   0022
      UMASK_SET:               0022
      NVIDIA_VISIBLE_DEVICES:  void
      PUID:                    568
      USER_ID:                 568
      UID:                     568
      PGID:                    568
      GROUP_ID:                568
      GID:                     568
      VNC_PASSWORD:            
    Mounts:
      /config from config (rw)
      /dev/shm from devshm (rw)
      /dev/sr0 from device-0 (rw)
      /mnt/rips from persist-list-0 (rw)
      /output from output (rw)
      /shared from shared (rw)
      /storage from storage (rw)
      /tmp from tmp (rw)
      /var/logs from varlogs (rw)
      /var/run from varrun (rw)
  Volumes:
   config:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  makemkv-config
    ReadOnly:   false
   device-0:
    Type:          HostPath (bare host directory volume)
    Path:          /dev/sr0
    HostPathType:  
   devshm:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
   output:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  makemkv-output
    ReadOnly:   false
   persist-list-0:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/Plex/rips
    HostPathType:  
   shared:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
   storage:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  makemkv-storage
    ReadOnly:   false
   tmp:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
   varlogs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
   varrun:
    Type:                       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:                     Memory
    SizeLimit:                  8Gi
  Topology Spread Constraints:  kubernetes.io/hostname:ScheduleAnyway when max skew 1 is exceeded for selector app.kubernetes.io/instance=makemkv,app.kubernetes.io/name=makemkv,pod.name=makemkv
                                truecharts.org/rack:ScheduleAnyway when max skew 1 is exceeded for selector app.kubernetes.io/instance=makemkv,app.kubernetes.io/name=makemkv,pod.name=makemkv
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Progressing    True    NewReplicaSetAvailable
  Available      True    MinimumReplicasAvailable
OldReplicaSets:  <none>
NewReplicaSet:   makemkv-859dc585d8 (1/1 replicas created)
Events:
  Type    Reason             Age   From                   Message
  ----    ------             ----  ----                   -------
  Normal  ScalingReplicaSet  34m   deployment-controller  Scaled down replica set makemkv-7c5789dd5d to 0 from 1
  Normal  ScalingReplicaSet  34m   deployment-controller  Scaled up replica set makemkv-859dc585d8 to 1


Name:               tailscale
Namespace:          ix-tailscale
CreationTimestamp:  Sun, 08 Oct 2023 14:04:18 -0400
Labels:             app=tailscale-6.0.10
                    app.kubernetes.io/instance=tailscale
                    app.kubernetes.io/managed-by=Helm
                    app.kubernetes.io/name=tailscale
                    app.kubernetes.io/version=1.56.0
                    helm-revision=25
                    helm.sh/chart=tailscale-6.0.10
                    release=tailscale
Annotations:        deployment.kubernetes.io/revision: 22
                    meta.helm.sh/release-name: tailscale
                    meta.helm.sh/release-namespace: ix-tailscale
Selector:           app.kubernetes.io/instance=tailscale,app.kubernetes.io/name=tailscale,pod.name=main
Replicas:           1 desired | 1 updated | 1 total | 1 available | 0 unavailable
StrategyType:       Recreate
MinReadySeconds:    0
Pod Template:
  Labels:           app=tailscale-6.0.10
                    app.kubernetes.io/instance=tailscale
                    app.kubernetes.io/managed-by=Helm
                    app.kubernetes.io/name=tailscale
                    app.kubernetes.io/version=1.56.0
                    helm-revision=25
                    helm.sh/chart=tailscale-6.0.10
                    pod.name=main
                    release=tailscale
  Annotations:      rollme: W08RC
  Service Account:  tailscale
  Containers:
   tailscale:
    Image:      tailscale/tailscale:v1.56.0@sha256:ed1f9317d0bab2bc17f6eecc29401479b91c938df48c28b1bd3d3014eba9d013
    Port:       <none>
    Host Port:  <none>
    Command:
      /usr/local/bin/containerboot
    Limits:
      cpu:     4
      memory:  8Gi
    Requests:
      cpu:     10m
      memory:  50Mi
    Environment Variables from:
      tailscale-tailscale-config  ConfigMap  Optional: false
    Environment:
      TZ:                      America/New_York
      UMASK:                   0022
      UMASK_SET:               0022
      NVIDIA_VISIBLE_DEVICES:  void
      PUID:                    568
      USER_ID:                 568
      UID:                     568
      PGID:                    568
      GROUP_ID:                568
      GID:                     568
    Mounts:
      /dev/net/tun from tun (rw)
      /dev/shm from devshm (rw)
      /shared from shared (rw)
      /tmp from tmp (rw)
      /var/logs from varlogs (rw)
      /var/run from varrun (rw)
  Volumes:
   devshm:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
   shared:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
   tmp:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
   tun:
    Type:          HostPath (bare host directory volume)
    Path:          /dev/net/tun
    HostPathType:  
   varlogs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
   varrun:
    Type:                       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:                     Memory
    SizeLimit:                  8Gi
  Topology Spread Constraints:  kubernetes.io/hostname:ScheduleAnyway when max skew 1 is exceeded for selector app.kubernetes.io/instance=tailscale,app.kubernetes.io/name=tailscale,pod.name=tailscale
                                truecharts.org/rack:ScheduleAnyway when max skew 1 is exceeded for selector app.kubernetes.io/instance=tailscale,app.kubernetes.io/name=tailscale,pod.name=tailscale
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Available      True    MinimumReplicasAvailable
  Progressing    True    NewReplicaSetAvailable
OldReplicaSets:  <none>
NewReplicaSet:   tailscale-6d686c8d6b (1/1 replicas created)
Events:
  Type    Reason             Age   From                   Message
  ----    ------             ----  ----                   -------
  Normal  ScalingReplicaSet  35m   deployment-controller  Scaled down replica set tailscale-54bcbc655f to 0 from 1
  Normal  ScalingReplicaSet  35m   deployment-controller  Scaled up replica set tailscale-56bbbb557b to 1
  Normal  ScalingReplicaSet  27m   deployment-controller  Scaled down replica set tailscale-56bbbb557b to 0 from 1
  Normal  ScalingReplicaSet  27m   deployment-controller  Scaled up replica set tailscale-54bcbc655f to 1 from 0
  Normal  ScalingReplicaSet  17m   deployment-controller  Scaled down replica set tailscale-54bcbc655f to 0 from 1
  Normal  ScalingReplicaSet  17m   deployment-controller  Scaled up replica set tailscale-6d686c8d6b to 1


Name:               plex
Namespace:          ix-plex
CreationTimestamp:  Sun, 24 Dec 2023 02:01:24 -0500
Labels:             app.kubernetes.io/instance=plex
                    app.kubernetes.io/managed-by=Helm
                    app.kubernetes.io/name=plex
Annotations:        deployment.kubernetes.io/revision: 13
                    meta.helm.sh/release-name: plex
                    meta.helm.sh/release-namespace: ix-plex
Selector:           app.kubernetes.io/instance=plex,app.kubernetes.io/name=plex
Replicas:           1 desired | 1 updated | 1 total | 1 available | 0 unavailable
StrategyType:       Recreate
MinReadySeconds:    0
Pod Template:
  Labels:       app.kubernetes.io/instance=plex
                app.kubernetes.io/name=plex
  Annotations:  rollme: cDV09
  Containers:
   plex:
    Image:       plexinc/pms-docker:plexpass
    Ports:       32400/TCP, 32469/TCP, 1900/UDP, 32410/UDP, 32412/UDP, 32413/UDP, 32414/UDP
    Host Ports:  32400/TCP, 32469/TCP, 1900/UDP, 32410/UDP, 32412/UDP, 32413/UDP, 32414/UDP
    Limits:
      amd.com/gpu:         0
      gpu.intel.com/i915:  0
      nvidia.com/gpu:      1
    Requests:
      cpu:      10m
      memory:   50Mi
    Liveness:   tcp-socket :32400 delay=10s timeout=5s period=10s #success=1 #failure=5
    Readiness:  tcp-socket :32400 delay=10s timeout=5s period=10s #success=2 #failure=5
    Startup:    tcp-socket :32400 delay=10s timeout=2s period=5s #success=1 #failure=60
    Environment:
      KUBE_NAMESPACE:               (v1:metadata.namespace)
      POD_IP:                       (v1:status.podIP)
      NVIDIA_DRIVER_CAPABILITIES:  all
      TZ:                          America/New_York
      PLEX_CLAIM:                  claim-xqNjvLPfxzjEUg1J2AzW
      PMS_INTERNAL_ADDRESS:        http://plex:32400
      PMS_IMAGE:                   plexinc/pms-docker:1.32.8.7639-fb6452ebf
    Mounts:
      /config from config (rw)
      /config/Library/Application Support/Plex Media Server/Logs from shared-logs (rw)
      /data from data (rw)
      /shared from shared (rw)
      /transcode from transcode (rw)
      Movies from extrappvolume-0 (rw)
  Volumes:
   config:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_config
    HostPathType:  
   data:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_data
    HostPathType:  
   shared:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
   shared-logs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
   transcode:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_transcode
    HostPathType:  
   extrappvolume-0:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Rusty/Spinners/Plex
    HostPathType:  
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Available      True    MinimumReplicasAvailable
  Progressing    True    NewReplicaSetAvailable
OldReplicaSets:  <none>
NewReplicaSet:   plex-68fb4bd7b4 (1/1 replicas created)
Events:
  Type    Reason             Age   From                   Message
  ----    ------             ----  ----                   -------
  Normal  ScalingReplicaSet  31m   deployment-controller  Scaled up replica set plex-846c59bbff to 1
  Normal  ScalingReplicaSet  30m   deployment-controller  Scaled down replica set plex-846c59bbff to 0 from 1
  Normal  ScalingReplicaSet  30m   deployment-controller  Scaled up replica set plex-c566c4b68 to 1
  Normal  ScalingReplicaSet  18m   deployment-controller  Scaled down replica set plex-c566c4b68 to 0 from 1
  Normal  ScalingReplicaSet  17m   deployment-controller  Scaled up replica set plex-5b5b8c5d48 to 1
  Normal  ScalingReplicaSet  15m   deployment-controller  Scaled down replica set plex-5b5b8c5d48 to 0 from 1
  Normal  ScalingReplicaSet  14m   deployment-controller  Scaled up replica set plex-7d74cbd9b7 to 1
  Normal  ScalingReplicaSet  14m   deployment-controller  Scaled down replica set plex-7d74cbd9b7 to 0 from 1
  Normal  ScalingReplicaSet  14m   deployment-controller  Scaled up replica set plex-74d6f7c5ff to 1
  Normal  ScalingReplicaSet  14m   deployment-controller  Scaled down replica set plex-74d6f7c5ff to 0 from 1
  Normal  ScalingReplicaSet  13m   deployment-controller  Scaled up replica set plex-68fb4bd7b4 to 1
