
------------------------------
1) Describe 'Pods' resources
------------------------------

Name:                 csi-nfs-node-8j9mw
Namespace:            kube-system
Priority:             2000001000
Priority Class Name:  system-node-critical
Service Account:      csi-nfs-node-sa
Node:                 ix-truenas/192.168.1.3
Start Time:           Sun, 24 Dec 2023 12:07:45 -0500
Labels:               app=csi-nfs-node
                      controller-revision-hash=598688b946
                      pod-template-generation=1
Annotations:          <none>
Status:               Running
IP:                   192.168.1.3
IPs:
  IP:           192.168.1.3
Controlled By:  DaemonSet/csi-nfs-node
Containers:
  liveness-probe:
    Container ID:  containerd://709e9f82986c0113c8022f0364d0f5210539c35ecb943bf14a5ad58ed96d551b
    Image:         registry.k8s.io/sig-storage/livenessprobe:v2.10.0
    Image ID:      registry.k8s.io/sig-storage/livenessprobe@sha256:4dc0b87ccd69f9865b89234d8555d3a614ab0a16ed94a3016ffd27f8106132ce
    Port:          <none>
    Host Port:     <none>
    Args:
      --csi-address=/csi/csi.sock
      --probe-timeout=3s
      --health-port=29653
      --v=2
    State:          Running
      Started:      Sun, 24 Dec 2023 12:07:47 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      memory:  100Mi
    Requests:
      cpu:        10m
      memory:     20Mi
    Environment:  <none>
    Mounts:
      /csi from socket-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-ndbpk (ro)
  node-driver-registrar:
    Container ID:  containerd://ede94daea2ef9400c26756fc2e59d8ebbd32b65cfa4374ce51a75223ff78da5e
    Image:         registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0
    Image ID:      registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:f6717ce72a2615c7fbc746b4068f788e78579c54c43b8716e5ce650d97af2df1
    Port:          <none>
    Host Port:     <none>
    Args:
      --v=2
      --csi-address=/csi/csi.sock
      --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
    State:          Running
      Started:      Sun, 24 Dec 2023 12:07:47 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      memory:  100Mi
    Requests:
      cpu:     10m
      memory:  20Mi
    Liveness:  exec [/csi-node-driver-registrar --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) --mode=kubelet-registration-probe] delay=30s timeout=15s period=10s #success=1 #failure=3
    Environment:
      DRIVER_REG_SOCK_PATH:  /var/lib/kubelet/plugins/csi-nfsplugin/csi.sock
      KUBE_NODE_NAME:         (v1:spec.nodeName)
    Mounts:
      /csi from socket-dir (rw)
      /registration from registration-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-ndbpk (ro)
  nfs:
    Container ID:  containerd://1c9ec29d9273ecafca0e0be44a0025f6764dbbbc41d8c8eb0f03addde6f1aa7c
    Image:         registry.k8s.io/sig-storage/nfsplugin:v4.4.0
    Image ID:      registry.k8s.io/sig-storage/nfsplugin@sha256:971cb526bc0108cca7d8efda32102629cb362f99bbd4879fd381bea1b2014ab4
    Port:          29653/TCP
    Host Port:     29653/TCP
    Args:
      -v=5
      --nodeid=$(NODE_ID)
      --endpoint=$(CSI_ENDPOINT)
    State:          Running
      Started:      Sun, 24 Dec 2023 12:07:48 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      memory:  300Mi
    Requests:
      cpu:     10m
      memory:  20Mi
    Liveness:  http-get http://:healthz/healthz delay=30s timeout=10s period=30s #success=1 #failure=5
    Environment:
      NODE_ID:        (v1:spec.nodeName)
      CSI_ENDPOINT:  unix:///csi/csi.sock
    Mounts:
      /csi from socket-dir (rw)
      /var/lib/kubelet/pods from pods-mount-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-ndbpk (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  socket-dir:
    Type:          HostPath (bare host directory volume)
    Path:          /var/lib/kubelet/plugins/csi-nfsplugin
    HostPathType:  DirectoryOrCreate
  pods-mount-dir:
    Type:          HostPath (bare host directory volume)
    Path:          /var/lib/kubelet/pods
    HostPathType:  Directory
  registration-dir:
    Type:          HostPath (bare host directory volume)
    Path:          /var/lib/kubelet/plugins_registry
    HostPathType:  Directory
  kube-api-access-ndbpk:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   Burstable
Node-Selectors:              kubernetes.io/os=linux
Tolerations:                 op=Exists
                             node.kubernetes.io/disk-pressure:NoSchedule op=Exists
                             node.kubernetes.io/memory-pressure:NoSchedule op=Exists
                             node.kubernetes.io/network-unavailable:NoSchedule op=Exists
                             node.kubernetes.io/not-ready:NoExecute op=Exists
                             node.kubernetes.io/pid-pressure:NoSchedule op=Exists
                             node.kubernetes.io/unreachable:NoExecute op=Exists
                             node.kubernetes.io/unschedulable:NoSchedule op=Exists
Events:
  Type     Reason        Age   From               Message
  ----     ------        ----  ----               -------
  Normal   Scheduled     24m   default-scheduler  Successfully assigned kube-system/csi-nfs-node-8j9mw to ix-truenas
  Warning  NodeShutdown  24m   kubelet            Pod was rejected as the node is shutting down.
  Normal   Pulled        23m   kubelet            Container image "registry.k8s.io/sig-storage/livenessprobe:v2.10.0" already present on machine
  Normal   Created       23m   kubelet            Created container liveness-probe
  Normal   Started       23m   kubelet            Started container liveness-probe
  Normal   Pulled        23m   kubelet            Container image "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0" already present on machine
  Normal   Created       23m   kubelet            Created container node-driver-registrar
  Normal   Started       23m   kubelet            Started container node-driver-registrar
  Normal   Pulled        23m   kubelet            Container image "registry.k8s.io/sig-storage/nfsplugin:v4.4.0" already present on machine
  Normal   Created       23m   kubelet            Created container nfs
  Normal   Started       23m   kubelet            Started container nfs


Name:                 csi-smb-node-r7glx
Namespace:            kube-system
Priority:             2000001000
Priority Class Name:  system-node-critical
Service Account:      csi-smb-node-sa
Node:                 ix-truenas/192.168.1.3
Start Time:           Sun, 24 Dec 2023 12:07:45 -0500
Labels:               app=csi-smb-node
                      controller-revision-hash=754c5545b6
                      pod-template-generation=1
Annotations:          <none>
Status:               Running
IP:                   192.168.1.3
IPs:
  IP:           192.168.1.3
Controlled By:  DaemonSet/csi-smb-node
Containers:
  liveness-probe:
    Container ID:  containerd://6596158a49b37ab48b1de29892dc9240dbf3f8be44574204788c425e0283267f
    Image:         registry.k8s.io/sig-storage/livenessprobe:v2.10.0
    Image ID:      registry.k8s.io/sig-storage/livenessprobe@sha256:4dc0b87ccd69f9865b89234d8555d3a614ab0a16ed94a3016ffd27f8106132ce
    Port:          <none>
    Host Port:     <none>
    Args:
      --csi-address=/csi/csi.sock
      --probe-timeout=3s
      --health-port=29643
      --v=2
    State:          Running
      Started:      Sun, 24 Dec 2023 12:07:49 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      memory:  100Mi
    Requests:
      cpu:        10m
      memory:     20Mi
    Environment:  <none>
    Mounts:
      /csi from socket-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-jdqvm (ro)
  node-driver-registrar:
    Container ID:  containerd://1a6b5d5fe49612238b2f583a0e5c56fc2dca0b8cf518566913feea715b2c8ec4
    Image:         registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0
    Image ID:      registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:f6717ce72a2615c7fbc746b4068f788e78579c54c43b8716e5ce650d97af2df1
    Port:          <none>
    Host Port:     <none>
    Args:
      --csi-address=$(ADDRESS)
      --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
      --v=2
    State:          Running
      Started:      Sun, 24 Dec 2023 12:07:49 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      memory:  100Mi
    Requests:
      cpu:     10m
      memory:  20Mi
    Liveness:  exec [/csi-node-driver-registrar --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) --mode=kubelet-registration-probe] delay=30s timeout=15s period=10s #success=1 #failure=3
    Environment:
      ADDRESS:               /csi/csi.sock
      DRIVER_REG_SOCK_PATH:  /var/lib/kubelet/plugins/smb.csi.k8s.io/csi.sock
    Mounts:
      /csi from socket-dir (rw)
      /registration from registration-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-jdqvm (ro)
  smb:
    Container ID:  containerd://3f36f2d363438668a07ef5c41ebe089642a4e15c982255831b05960668159dd8
    Image:         registry.k8s.io/sig-storage/smbplugin:v1.11.0
    Image ID:      registry.k8s.io/sig-storage/smbplugin@sha256:65f664c6e9cce565805b13fb82b40018d56e39f41616134f572c52f3964c6ee4
    Port:          29643/TCP
    Host Port:     29643/TCP
    Args:
      --v=5
      --endpoint=$(CSI_ENDPOINT)
      --nodeid=$(KUBE_NODE_NAME)
    State:          Running
      Started:      Sun, 24 Dec 2023 12:07:50 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      memory:  200Mi
    Requests:
      cpu:     10m
      memory:  20Mi
    Liveness:  http-get http://:healthz/healthz delay=30s timeout=10s period=30s #success=1 #failure=5
    Environment:
      CSI_ENDPOINT:    unix:///csi/csi.sock
      KUBE_NODE_NAME:   (v1:spec.nodeName)
    Mounts:
      /csi from socket-dir (rw)
      /var/lib/kubelet/ from mountpoint-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-jdqvm (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  socket-dir:
    Type:          HostPath (bare host directory volume)
    Path:          /var/lib/kubelet/plugins/smb.csi.k8s.io
    HostPathType:  DirectoryOrCreate
  mountpoint-dir:
    Type:          HostPath (bare host directory volume)
    Path:          /var/lib/kubelet/
    HostPathType:  DirectoryOrCreate
  registration-dir:
    Type:          HostPath (bare host directory volume)
    Path:          /var/lib/kubelet/plugins_registry/
    HostPathType:  DirectoryOrCreate
  kube-api-access-jdqvm:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   Burstable
Node-Selectors:              kubernetes.io/os=linux
Tolerations:                 op=Exists
                             node.kubernetes.io/disk-pressure:NoSchedule op=Exists
                             node.kubernetes.io/memory-pressure:NoSchedule op=Exists
                             node.kubernetes.io/network-unavailable:NoSchedule op=Exists
                             node.kubernetes.io/not-ready:NoExecute op=Exists
                             node.kubernetes.io/pid-pressure:NoSchedule op=Exists
                             node.kubernetes.io/unreachable:NoExecute op=Exists
                             node.kubernetes.io/unschedulable:NoSchedule op=Exists
Events:
  Type     Reason        Age   From               Message
  ----     ------        ----  ----               -------
  Normal   Scheduled     24m   default-scheduler  Successfully assigned kube-system/csi-smb-node-r7glx to ix-truenas
  Warning  NodeShutdown  24m   kubelet            Pod was rejected as the node is shutting down.
  Normal   Pulled        23m   kubelet            Container image "registry.k8s.io/sig-storage/livenessprobe:v2.10.0" already present on machine
  Normal   Created       23m   kubelet            Created container liveness-probe
  Normal   Started       23m   kubelet            Started container liveness-probe
  Normal   Pulled        23m   kubelet            Container image "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0" already present on machine
  Normal   Created       23m   kubelet            Created container node-driver-registrar
  Normal   Started       23m   kubelet            Started container node-driver-registrar
  Normal   Pulled        23m   kubelet            Container image "registry.k8s.io/sig-storage/smbplugin:v1.11.0" already present on machine
  Normal   Created       23m   kubelet            Created container smb
  Normal   Started       23m   kubelet            Started container smb


Name:             svclb-makemkv-84687e74-8kljd
Namespace:        kube-system
Priority:         0
Service Account:  svclb
Node:             ix-truenas/192.168.1.3
Start Time:       Sun, 24 Dec 2023 12:07:53 -0500
Labels:           app=svclb-makemkv-84687e74
                  controller-revision-hash=5fb6f544cf
                  pod-template-generation=1
                  svccontroller.k3s.cattle.io/svcname=makemkv
                  svccontroller.k3s.cattle.io/svcnamespace=ix-makemkv
Annotations:      k8s.v1.cni.cncf.io/network-status:
                    [{
                        "name": "ix-net",
                        "interface": "eth0",
                        "ips": [
                            "172.16.0.234"
                        ],
                        "mac": "02:f7:0b:12:7b:41",
                        "default": true,
                        "dns": {},
                        "gateway": [
                            "172.16.0.1"
                        ]
                    }]
Status:           Running
IP:               172.16.0.234
IPs:
  IP:           172.16.0.234
Controlled By:  DaemonSet/svclb-makemkv-84687e74
Containers:
  lb-tcp-10180:
    Container ID:   containerd://b92d61cd4b4c1feddbb1ffe015f62f62091e7a79ccbb25cf012d61d7547de15a
    Image:          rancher/klipper-lb:v0.4.4
    Image ID:       docker.io/rancher/klipper-lb@sha256:d6780e97ac25454b56f88410b236d52572518040f11d0db5c6baaac0d2fcf860
    Port:           10180/TCP
    Host Port:      10180/TCP
    State:          Running
      Started:      Sun, 24 Dec 2023 12:07:54 -0500
    Ready:          True
    Restart Count:  0
    Environment:
      SRC_PORT:    10180
      SRC_RANGES:  0.0.0.0/0
      DEST_PROTO:  TCP
      DEST_PORT:   10180
      DEST_IPS:    172.17.166.94
    Mounts:        <none>
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:            <none>
QoS Class:          BestEffort
Node-Selectors:     <none>
Tolerations:        CriticalAddonsOnly op=Exists
                    node-role.kubernetes.io/control-plane:NoSchedule op=Exists
                    node-role.kubernetes.io/master:NoSchedule op=Exists
                    node.kubernetes.io/disk-pressure:NoSchedule op=Exists
                    node.kubernetes.io/memory-pressure:NoSchedule op=Exists
                    node.kubernetes.io/not-ready:NoExecute op=Exists
                    node.kubernetes.io/pid-pressure:NoSchedule op=Exists
                    node.kubernetes.io/unreachable:NoExecute op=Exists
                    node.kubernetes.io/unschedulable:NoSchedule op=Exists
Events:
  Type    Reason          Age   From               Message
  ----    ------          ----  ----               -------
  Normal  Scheduled       23m   default-scheduler  Successfully assigned kube-system/svclb-makemkv-84687e74-8kljd to ix-truenas
  Normal  AddedInterface  23m   multus             Add eth0 [172.16.0.234/16] from ix-net
  Normal  Pulled          23m   kubelet            Container image "rancher/klipper-lb:v0.4.4" already present on machine
  Normal  Created         23m   kubelet            Created container lb-tcp-10180
  Normal  Started         23m   kubelet            Started container lb-tcp-10180


Name:             svclb-makemkv-vnc-ad0786b1-l48xl
Namespace:        kube-system
Priority:         0
Service Account:  svclb
Node:             ix-truenas/192.168.1.3
Start Time:       Sun, 24 Dec 2023 12:07:53 -0500
Labels:           app=svclb-makemkv-vnc-ad0786b1
                  controller-revision-hash=97f745875
                  pod-template-generation=1
                  svccontroller.k3s.cattle.io/svcname=makemkv-vnc
                  svccontroller.k3s.cattle.io/svcnamespace=ix-makemkv
Annotations:      k8s.v1.cni.cncf.io/network-status:
                    [{
                        "name": "ix-net",
                        "interface": "eth0",
                        "ips": [
                            "172.16.0.233"
                        ],
                        "mac": "c2:0a:d0:92:d5:45",
                        "default": true,
                        "dns": {},
                        "gateway": [
                            "172.16.0.1"
                        ]
                    }]
Status:           Running
IP:               172.16.0.233
IPs:
  IP:           172.16.0.233
Controlled By:  DaemonSet/svclb-makemkv-vnc-ad0786b1
Containers:
  lb-tcp-10181:
    Container ID:   containerd://c379a7b7148bafce0389d2a036960d730524a4b1f04df7356e00479ecf2610b1
    Image:          rancher/klipper-lb:v0.4.4
    Image ID:       docker.io/rancher/klipper-lb@sha256:d6780e97ac25454b56f88410b236d52572518040f11d0db5c6baaac0d2fcf860
    Port:           10181/TCP
    Host Port:      10181/TCP
    State:          Running
      Started:      Sun, 24 Dec 2023 12:07:54 -0500
    Ready:          True
    Restart Count:  0
    Environment:
      SRC_PORT:    10181
      SRC_RANGES:  0.0.0.0/0
      DEST_PROTO:  TCP
      DEST_PORT:   10181
      DEST_IPS:    172.17.99.26
    Mounts:        <none>
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:            <none>
QoS Class:          BestEffort
Node-Selectors:     <none>
Tolerations:        CriticalAddonsOnly op=Exists
                    node-role.kubernetes.io/control-plane:NoSchedule op=Exists
                    node-role.kubernetes.io/master:NoSchedule op=Exists
                    node.kubernetes.io/disk-pressure:NoSchedule op=Exists
                    node.kubernetes.io/memory-pressure:NoSchedule op=Exists
                    node.kubernetes.io/not-ready:NoExecute op=Exists
                    node.kubernetes.io/pid-pressure:NoSchedule op=Exists
                    node.kubernetes.io/unreachable:NoExecute op=Exists
                    node.kubernetes.io/unschedulable:NoSchedule op=Exists
Events:
  Type    Reason          Age   From               Message
  ----    ------          ----  ----               -------
  Normal  Scheduled       23m   default-scheduler  Successfully assigned kube-system/svclb-makemkv-vnc-ad0786b1-l48xl to ix-truenas
  Normal  AddedInterface  23m   multus             Add eth0 [172.16.0.233/16] from ix-net
  Normal  Pulled          23m   kubelet            Container image "rancher/klipper-lb:v0.4.4" already present on machine
  Normal  Created         23m   kubelet            Created container lb-tcp-10181
  Normal  Started         23m   kubelet            Started container lb-tcp-10181


Name:             svclb-openspeedtest-5fe4cae5-hxgdq
Namespace:        kube-system
Priority:         0
Service Account:  svclb
Node:             ix-truenas/192.168.1.3
Start Time:       Sun, 24 Dec 2023 12:07:53 -0500
Labels:           app=svclb-openspeedtest-5fe4cae5
                  controller-revision-hash=7ccddd4b4f
                  pod-template-generation=1
                  svccontroller.k3s.cattle.io/svcname=openspeedtest
                  svccontroller.k3s.cattle.io/svcnamespace=ix-openspeedtest
Annotations:      k8s.v1.cni.cncf.io/network-status:
                    [{
                        "name": "ix-net",
                        "interface": "eth0",
                        "ips": [
                            "172.16.0.232"
                        ],
                        "mac": "62:d2:8a:3d:0e:e5",
                        "default": true,
                        "dns": {},
                        "gateway": [
                            "172.16.0.1"
                        ]
                    }]
Status:           Running
IP:               172.16.0.232
IPs:
  IP:           172.16.0.232
Controlled By:  DaemonSet/svclb-openspeedtest-5fe4cae5
Containers:
  lb-tcp-10256:
    Container ID:   containerd://b9b84e39c769509b1b8277b156728f6d22f8d63b9854d691249216743b745d6a
    Image:          rancher/klipper-lb:v0.4.4
    Image ID:       docker.io/rancher/klipper-lb@sha256:d6780e97ac25454b56f88410b236d52572518040f11d0db5c6baaac0d2fcf860
    Port:           10256/TCP
    Host Port:      10256/TCP
    State:          Running
      Started:      Sun, 24 Dec 2023 12:07:54 -0500
    Ready:          True
    Restart Count:  0
    Environment:
      SRC_PORT:    10256
      SRC_RANGES:  0.0.0.0/0
      DEST_PROTO:  TCP
      DEST_PORT:   10256
      DEST_IPS:    172.17.139.9
    Mounts:        <none>
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:            <none>
QoS Class:          BestEffort
Node-Selectors:     <none>
Tolerations:        CriticalAddonsOnly op=Exists
                    node-role.kubernetes.io/control-plane:NoSchedule op=Exists
                    node-role.kubernetes.io/master:NoSchedule op=Exists
                    node.kubernetes.io/disk-pressure:NoSchedule op=Exists
                    node.kubernetes.io/memory-pressure:NoSchedule op=Exists
                    node.kubernetes.io/not-ready:NoExecute op=Exists
                    node.kubernetes.io/pid-pressure:NoSchedule op=Exists
                    node.kubernetes.io/unreachable:NoExecute op=Exists
                    node.kubernetes.io/unschedulable:NoSchedule op=Exists
Events:
  Type    Reason          Age   From               Message
  ----    ------          ----  ----               -------
  Normal  Scheduled       23m   default-scheduler  Successfully assigned kube-system/svclb-openspeedtest-5fe4cae5-hxgdq to ix-truenas
  Normal  AddedInterface  23m   multus             Add eth0 [172.16.0.232/16] from ix-net
  Normal  Pulled          23m   kubelet            Container image "rancher/klipper-lb:v0.4.4" already present on machine
  Normal  Created         23m   kubelet            Created container lb-tcp-10256
  Normal  Started         23m   kubelet            Started container lb-tcp-10256


Name:                 coredns-59b4f5bbd5-pcw68
Namespace:            kube-system
Priority:             2000000000
Priority Class Name:  system-cluster-critical
Service Account:      coredns
Node:                 ix-truenas/192.168.1.3
Start Time:           Sun, 24 Dec 2023 12:08:02 -0500
Labels:               k8s-app=kube-dns
                      pod-template-hash=59b4f5bbd5
Annotations:          k8s.v1.cni.cncf.io/network-status:
                        [{
                            "name": "ix-net",
                            "interface": "eth0",
                            "ips": [
                                "172.16.0.240"
                            ],
                            "mac": "32:86:63:89:86:27",
                            "default": true,
                            "dns": {},
                            "gateway": [
                                "172.16.0.1"
                            ]
                        }]
Status:               Running
IP:                   172.16.0.240
IPs:
  IP:           172.16.0.240
Controlled By:  ReplicaSet/coredns-59b4f5bbd5
Containers:
  coredns:
    Container ID:  containerd://cdcb4a65e51a239a01b4f72f0c2c66461c0f7a7aeeb780412f1661fc3500c269
    Image:         rancher/mirrored-coredns-coredns:1.10.1
    Image ID:      docker.io/rancher/mirrored-coredns-coredns@sha256:a11fafae1f8037cbbd66c5afa40ba2423936b72b4fd50a7034a7e8b955163594
    Ports:         53/UDP, 53/TCP, 9153/TCP
    Host Ports:    0/UDP, 0/TCP, 0/TCP
    Args:
      -conf
      /etc/coredns/Corefile
    State:          Running
      Started:      Sun, 24 Dec 2023 12:08:04 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      memory:  170Mi
    Requests:
      cpu:        100m
      memory:     70Mi
    Liveness:     http-get http://:8080/health delay=60s timeout=1s period=10s #success=1 #failure=3
    Readiness:    http-get http://:8181/ready delay=0s timeout=1s period=2s #success=1 #failure=3
    Environment:  <none>
    Mounts:
      /etc/coredns from config-volume (ro)
      /etc/coredns/custom from custom-config-volume (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-zt7s7 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  config-volume:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      coredns
    Optional:  false
  custom-config-volume:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      coredns-custom
    Optional:  true
  kube-api-access-zt7s7:
    Type:                     Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:   3607
    ConfigMapName:            kube-root-ca.crt
    ConfigMapOptional:        <nil>
    DownwardAPI:              true
QoS Class:                    Burstable
Node-Selectors:               kubernetes.io/os=linux
Tolerations:                  CriticalAddonsOnly op=Exists
                              node-role.kubernetes.io/control-plane:NoSchedule op=Exists
                              node-role.kubernetes.io/master:NoSchedule op=Exists
                              node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                              node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Topology Spread Constraints:  kubernetes.io/hostname:DoNotSchedule when max skew 1 is exceeded for selector k8s-app=kube-dns
Events:
  Type    Reason          Age   From               Message
  ----    ------          ----  ----               -------
  Normal  Scheduled       23m   default-scheduler  Successfully assigned kube-system/coredns-59b4f5bbd5-pcw68 to ix-truenas
  Normal  AddedInterface  23m   multus             Add eth0 [172.16.0.240/16] from ix-net
  Normal  Pulled          23m   kubelet            Container image "rancher/mirrored-coredns-coredns:1.10.1" already present on machine
  Normal  Created         23m   kubelet            Created container coredns
  Normal  Started         23m   kubelet            Started container coredns


Name:                 snapshot-controller-546868dfb4-x7596
Namespace:            kube-system
Priority:             2000000000
Priority Class Name:  system-cluster-critical
Service Account:      snapshot-controller
Node:                 ix-truenas/192.168.1.3
Start Time:           Sun, 24 Dec 2023 12:07:53 -0500
Labels:               app=snapshot-controller
                      pod-template-hash=546868dfb4
Annotations:          k8s.v1.cni.cncf.io/network-status:
                        [{
                            "name": "ix-net",
                            "interface": "eth0",
                            "ips": [
                                "172.16.0.238"
                            ],
                            "mac": "42:83:61:67:18:c7",
                            "default": true,
                            "dns": {},
                            "gateway": [
                                "172.16.0.1"
                            ]
                        }]
Status:               Running
IP:                   172.16.0.238
IPs:
  IP:           172.16.0.238
Controlled By:  ReplicaSet/snapshot-controller-546868dfb4
Containers:
  snapshot-controller:
    Container ID:  containerd://b3b3522bace3e844e19f3539fc4df4f5ada5744f444fefca4e45efd5f94ffc03
    Image:         registry.k8s.io/sig-storage/snapshot-controller:v6.2.2
    Image ID:      registry.k8s.io/sig-storage/snapshot-controller@sha256:fb95b65bb88f319f0f7d5397c401a654164f11a191f466b4026fa36085c7141b
    Port:          <none>
    Host Port:     <none>
    Args:
      --v=2
      --leader-election=true
      --leader-election-namespace=kube-system
    State:          Running
      Started:      Sun, 24 Dec 2023 12:08:01 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      memory:  300Mi
    Requests:
      cpu:        10m
      memory:     20Mi
    Environment:  <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-5q5k4 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  kube-api-access-5q5k4:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   Burstable
Node-Selectors:              kubernetes.io/os=linux
Tolerations:                 node-role.kubernetes.io/control-plane=true:NoSchedule
                             node-role.kubernetes.io/controlplane=true:NoSchedule
                             node-role.kubernetes.io/master=true:NoSchedule
                             node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type    Reason          Age   From               Message
  ----    ------          ----  ----               -------
  Normal  Scheduled       23m   default-scheduler  Successfully assigned kube-system/snapshot-controller-546868dfb4-x7596 to ix-truenas
  Normal  AddedInterface  23m   multus             Add eth0 [172.16.0.238/16] from ix-net
  Normal  Pulled          23m   kubelet            Container image "registry.k8s.io/sig-storage/snapshot-controller:v6.2.2" already present on machine
  Normal  Created         23m   kubelet            Created container snapshot-controller
  Normal  Started         23m   kubelet            Started container snapshot-controller


Name:                 csi-smb-controller-7fbbb8fb6f-494xd
Namespace:            kube-system
Priority:             2000000000
Priority Class Name:  system-cluster-critical
Service Account:      csi-smb-controller-sa
Node:                 ix-truenas/192.168.1.3
Start Time:           Sun, 24 Dec 2023 12:07:53 -0500
Labels:               app=csi-smb-controller
                      pod-template-hash=7fbbb8fb6f
Annotations:          <none>
Status:               Running
IP:                   192.168.1.3
IPs:
  IP:           192.168.1.3
Controlled By:  ReplicaSet/csi-smb-controller-7fbbb8fb6f
Containers:
  csi-provisioner:
    Container ID:  containerd://3b785eac72ab636126641f101fda305fc460e1d11f4a3b60de897ce1f9ddc58c
    Image:         registry.k8s.io/sig-storage/csi-provisioner:v3.5.0
    Image ID:      registry.k8s.io/sig-storage/csi-provisioner@sha256:d078dc174323407e8cc6f0f9abd4efaac5db27838f1564d0253d5e3233e3f17f
    Port:          <none>
    Host Port:     <none>
    Args:
      -v=2
      --csi-address=$(ADDRESS)
      --leader-election
      --leader-election-namespace=kube-system
      --extra-create-metadata=true
    State:          Running
      Started:      Sun, 24 Dec 2023 12:08:00 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     1
      memory:  300Mi
    Requests:
      cpu:     10m
      memory:  20Mi
    Environment:
      ADDRESS:  /csi/csi.sock
    Mounts:
      /csi from socket-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-jwg92 (ro)
  liveness-probe:
    Container ID:  containerd://5c63b5fefb414d5f3724717f620dbda7a3fe1450a57cec61b7839db3e1106cb2
    Image:         registry.k8s.io/sig-storage/livenessprobe:v2.10.0
    Image ID:      registry.k8s.io/sig-storage/livenessprobe@sha256:4dc0b87ccd69f9865b89234d8555d3a614ab0a16ed94a3016ffd27f8106132ce
    Port:          <none>
    Host Port:     <none>
    Args:
      --csi-address=/csi/csi.sock
      --probe-timeout=3s
      --health-port=29642
      --v=2
    State:          Running
      Started:      Sun, 24 Dec 2023 12:08:00 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     1
      memory:  100Mi
    Requests:
      cpu:        10m
      memory:     20Mi
    Environment:  <none>
    Mounts:
      /csi from socket-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-jwg92 (ro)
  smb:
    Container ID:  containerd://dd67b6d14a1f85ed801e2a2baa356bb2dd2b474bde0734acc843a54b7d1285c1
    Image:         registry.k8s.io/sig-storage/smbplugin:v1.11.0
    Image ID:      registry.k8s.io/sig-storage/smbplugin@sha256:65f664c6e9cce565805b13fb82b40018d56e39f41616134f572c52f3964c6ee4
    Ports:         29642/TCP, 29644/TCP
    Host Ports:    29642/TCP, 29644/TCP
    Args:
      --v=5
      --endpoint=$(CSI_ENDPOINT)
      --metrics-address=0.0.0.0:29644
    State:          Running
      Started:      Sun, 24 Dec 2023 12:08:01 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      memory:  200Mi
    Requests:
      cpu:     10m
      memory:  20Mi
    Liveness:  http-get http://:healthz/healthz delay=30s timeout=10s period=30s #success=1 #failure=5
    Environment:
      CSI_ENDPOINT:  unix:///csi/csi.sock
    Mounts:
      /csi from socket-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-jwg92 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  socket-dir:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  kube-api-access-jwg92:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   Burstable
Node-Selectors:              kubernetes.io/os=linux
Tolerations:                 node-role.kubernetes.io/control-plane:NoSchedule op=Exists
                             node-role.kubernetes.io/controlplane:NoSchedule op=Exists
                             node-role.kubernetes.io/master:NoSchedule op=Exists
                             node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason            Age   From               Message
  ----     ------            ----  ----               -------
  Warning  FailedScheduling  24m   default-scheduler  0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling..
  Warning  FailedScheduling  23m   default-scheduler  0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling..
  Normal   Scheduled         23m   default-scheduler  Successfully assigned kube-system/csi-smb-controller-7fbbb8fb6f-494xd to ix-truenas
  Normal   Pulled            23m   kubelet            Container image "registry.k8s.io/sig-storage/csi-provisioner:v3.5.0" already present on machine
  Normal   Created           23m   kubelet            Created container csi-provisioner
  Normal   Started           23m   kubelet            Started container csi-provisioner
  Normal   Pulled            23m   kubelet            Container image "registry.k8s.io/sig-storage/livenessprobe:v2.10.0" already present on machine
  Normal   Created           23m   kubelet            Created container liveness-probe
  Normal   Started           23m   kubelet            Started container liveness-probe
  Normal   Pulled            23m   kubelet            Container image "registry.k8s.io/sig-storage/smbplugin:v1.11.0" already present on machine
  Normal   Created           23m   kubelet            Created container smb
  Normal   Started           23m   kubelet            Started container smb


Name:                 openebs-zfs-node-frcqf
Namespace:            kube-system
Priority:             900001000
Priority Class Name:  openebs-zfs-csi-node-critical
Service Account:      openebs-zfs-node-sa
Node:                 ix-truenas/192.168.1.3
Start Time:           Sun, 24 Dec 2023 12:07:53 -0500
Labels:               app=openebs-zfs-node
                      controller-revision-hash=6456c99fd9
                      openebs.io/component-name=openebs-zfs-node
                      openebs.io/version=2.3.0
                      pod-template-generation=2
                      role=openebs-zfs
Annotations:          <none>
Status:               Running
IP:                   192.168.1.3
IPs:
  IP:           192.168.1.3
Controlled By:  DaemonSet/openebs-zfs-node
Containers:
  csi-node-driver-registrar:
    Container ID:  containerd://40882f1af010a984ee0182301634d24a7cea4cb0159db551be907808cc5c5433
    Image:         registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0
    Image ID:      registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:f6717ce72a2615c7fbc746b4068f788e78579c54c43b8716e5ce650d97af2df1
    Port:          <none>
    Host Port:     <none>
    Args:
      --v=5
      --csi-address=$(ADDRESS)
      --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
    State:          Running
      Started:      Sun, 24 Dec 2023 12:08:01 -0500
    Ready:          True
    Restart Count:  0
    Environment:
      ADDRESS:               /plugin/csi.sock
      DRIVER_REG_SOCK_PATH:  /var/lib/kubelet/plugins/zfs-localpv/csi.sock
      KUBE_NODE_NAME:         (v1:spec.nodeName)
      NODE_DRIVER:           openebs-zfs
    Mounts:
      /plugin from plugin-dir (rw)
      /registration from registration-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-wkck8 (ro)
  openebs-zfs-plugin:
    Container ID:  containerd://ce49d79b9696055a669874bc3ac7c1537c45e9be4acc12bbee5a06f073cc7b17
    Image:         openebs/zfs-driver:2.3.0
    Image ID:      docker.io/openebs/zfs-driver@sha256:42b8d31edc7cdf4116ecd6369f89a1d477bb9b64e695d074d290789af1a461dd
    Port:          <none>
    Host Port:     <none>
    Args:
      --nodename=$(OPENEBS_NODE_NAME)
      --endpoint=$(OPENEBS_CSI_ENDPOINT)
      --plugin=$(OPENEBS_NODE_DRIVER)
    State:          Running
      Started:      Sun, 24 Dec 2023 12:08:02 -0500
    Ready:          True
    Restart Count:  0
    Environment:
      OPENEBS_NODE_NAME:      (v1:spec.nodeName)
      OPENEBS_CSI_ENDPOINT:  unix:///plugin/csi.sock
      OPENEBS_NODE_DRIVER:   agent
      OPENEBS_NAMESPACE:     openebs
      ALLOWED_TOPOLOGIES:    All
    Mounts:
      /dev from device-dir (rw)
      /home/keys from encr-keys (rw)
      /host from host-root (ro)
      /plugin from plugin-dir (rw)
      /sbin/zfs from chroot-zfs (rw,path="zfs")
      /var/lib/kubelet/ from pods-mount-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-wkck8 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  device-dir:
    Type:          HostPath (bare host directory volume)
    Path:          /dev
    HostPathType:  Directory
  encr-keys:
    Type:          HostPath (bare host directory volume)
    Path:          /home/keys
    HostPathType:  DirectoryOrCreate
  chroot-zfs:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      openebs-zfspv-bin
    Optional:  false
  host-root:
    Type:          HostPath (bare host directory volume)
    Path:          /
    HostPathType:  Directory
  registration-dir:
    Type:          HostPath (bare host directory volume)
    Path:          /var/lib/kubelet/plugins_registry/
    HostPathType:  DirectoryOrCreate
  plugin-dir:
    Type:          HostPath (bare host directory volume)
    Path:          /var/lib/kubelet/plugins/zfs-localpv/
    HostPathType:  DirectoryOrCreate
  pods-mount-dir:
    Type:          HostPath (bare host directory volume)
    Path:          /var/lib/kubelet/
    HostPathType:  Directory
  kube-api-access-wkck8:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/disk-pressure:NoSchedule op=Exists
                             node.kubernetes.io/memory-pressure:NoSchedule op=Exists
                             node.kubernetes.io/network-unavailable:NoSchedule op=Exists
                             node.kubernetes.io/not-ready:NoExecute op=Exists
                             node.kubernetes.io/pid-pressure:NoSchedule op=Exists
                             node.kubernetes.io/unreachable:NoExecute op=Exists
                             node.kubernetes.io/unschedulable:NoSchedule op=Exists
Events:
  Type    Reason     Age   From               Message
  ----    ------     ----  ----               -------
  Normal  Scheduled  23m   default-scheduler  Successfully assigned kube-system/openebs-zfs-node-frcqf to ix-truenas
  Normal  Pulled     23m   kubelet            Container image "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0" already present on machine
  Normal  Created    23m   kubelet            Created container csi-node-driver-registrar
  Normal  Started    23m   kubelet            Started container csi-node-driver-registrar
  Normal  Pulled     23m   kubelet            Container image "openebs/zfs-driver:2.3.0" already present on machine
  Normal  Created    23m   kubelet            Created container openebs-zfs-plugin
  Normal  Started    23m   kubelet            Started container openebs-zfs-plugin


Name:                 snapshot-controller-546868dfb4-bkwwt
Namespace:            kube-system
Priority:             2000000000
Priority Class Name:  system-cluster-critical
Service Account:      snapshot-controller
Node:                 ix-truenas/192.168.1.3
Start Time:           Sun, 24 Dec 2023 12:07:53 -0500
Labels:               app=snapshot-controller
                      pod-template-hash=546868dfb4
Annotations:          k8s.v1.cni.cncf.io/network-status:
                        [{
                            "name": "ix-net",
                            "interface": "eth0",
                            "ips": [
                                "172.16.0.239"
                            ],
                            "mac": "3a:4f:7c:79:14:f4",
                            "default": true,
                            "dns": {},
                            "gateway": [
                                "172.16.0.1"
                            ]
                        }]
Status:               Running
IP:                   172.16.0.239
IPs:
  IP:           172.16.0.239
Controlled By:  ReplicaSet/snapshot-controller-546868dfb4
Containers:
  snapshot-controller:
    Container ID:  containerd://fcbed78ba3efe9736036f851d4f0c4b5457ee6552a5747926c167a38d08e6035
    Image:         registry.k8s.io/sig-storage/snapshot-controller:v6.2.2
    Image ID:      registry.k8s.io/sig-storage/snapshot-controller@sha256:fb95b65bb88f319f0f7d5397c401a654164f11a191f466b4026fa36085c7141b
    Port:          <none>
    Host Port:     <none>
    Args:
      --v=2
      --leader-election=true
      --leader-election-namespace=kube-system
    State:          Running
      Started:      Sun, 24 Dec 2023 12:08:01 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      memory:  300Mi
    Requests:
      cpu:        10m
      memory:     20Mi
    Environment:  <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-txl4g (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  kube-api-access-txl4g:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   Burstable
Node-Selectors:              kubernetes.io/os=linux
Tolerations:                 node-role.kubernetes.io/control-plane=true:NoSchedule
                             node-role.kubernetes.io/controlplane=true:NoSchedule
                             node-role.kubernetes.io/master=true:NoSchedule
                             node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type    Reason          Age   From               Message
  ----    ------          ----  ----               -------
  Normal  Scheduled       23m   default-scheduler  Successfully assigned kube-system/snapshot-controller-546868dfb4-bkwwt to ix-truenas
  Normal  AddedInterface  23m   multus             Add eth0 [172.16.0.239/16] from ix-net
  Normal  Pulled          23m   kubelet            Container image "registry.k8s.io/sig-storage/snapshot-controller:v6.2.2" already present on machine
  Normal  Created         23m   kubelet            Created container snapshot-controller
  Normal  Started         23m   kubelet            Started container snapshot-controller


Name:                 csi-nfs-controller-7b74694749-v2fqb
Namespace:            kube-system
Priority:             2000000000
Priority Class Name:  system-cluster-critical
Service Account:      csi-nfs-controller-sa
Node:                 ix-truenas/192.168.1.3
Start Time:           Sun, 24 Dec 2023 12:07:53 -0500
Labels:               app=csi-nfs-controller
                      pod-template-hash=7b74694749
Annotations:          <none>
Status:               Running
IP:                   192.168.1.3
IPs:
  IP:           192.168.1.3
Controlled By:  ReplicaSet/csi-nfs-controller-7b74694749
Containers:
  csi-provisioner:
    Container ID:  containerd://e26f0ffa615a2833909bbe1e918a8c8439605fc75a79931e531b76908d619044
    Image:         registry.k8s.io/sig-storage/csi-provisioner:v3.5.0
    Image ID:      registry.k8s.io/sig-storage/csi-provisioner@sha256:d078dc174323407e8cc6f0f9abd4efaac5db27838f1564d0253d5e3233e3f17f
    Port:          <none>
    Host Port:     <none>
    Args:
      -v=2
      --csi-address=$(ADDRESS)
      --leader-election
      --leader-election-namespace=kube-system
      --extra-create-metadata=true
    State:          Running
      Started:      Sun, 24 Dec 2023 12:07:57 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      memory:  400Mi
    Requests:
      cpu:     10m
      memory:  20Mi
    Environment:
      ADDRESS:  /csi/csi.sock
    Mounts:
      /csi from socket-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-dpdf6 (ro)
  csi-snapshotter:
    Container ID:  containerd://a64a3c8c68c8f6d9118c3f0dc74703196a75f85f9e92c2039b4be7dfdd483897
    Image:         registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2
    Image ID:      registry.k8s.io/sig-storage/csi-snapshotter@sha256:becc53e25b96573f61f7469923a92fb3e9d3a3781732159954ce0d9da07233a2
    Port:          <none>
    Host Port:     <none>
    Args:
      --v=2
      --csi-address=$(ADDRESS)
      --leader-election-namespace=kube-system
      --leader-election
    State:          Running
      Started:      Sun, 24 Dec 2023 12:07:57 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      memory:  200Mi
    Requests:
      cpu:     10m
      memory:  20Mi
    Environment:
      ADDRESS:  /csi/csi.sock
    Mounts:
      /csi from socket-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-dpdf6 (ro)
  liveness-probe:
    Container ID:  containerd://ade3820914b78bf6c17ad70dc95d400606007fac07429ee329bde3a92a2c065b
    Image:         registry.k8s.io/sig-storage/livenessprobe:v2.10.0
    Image ID:      registry.k8s.io/sig-storage/livenessprobe@sha256:4dc0b87ccd69f9865b89234d8555d3a614ab0a16ed94a3016ffd27f8106132ce
    Port:          <none>
    Host Port:     <none>
    Args:
      --csi-address=/csi/csi.sock
      --probe-timeout=3s
      --health-port=29652
      --v=2
    State:          Running
      Started:      Sun, 24 Dec 2023 12:07:58 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      memory:  100Mi
    Requests:
      cpu:        10m
      memory:     20Mi
    Environment:  <none>
    Mounts:
      /csi from socket-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-dpdf6 (ro)
  nfs:
    Container ID:  containerd://de032828333d4795febe17f5551c194f5d2e56cb256258ab2e8c875acc5d9c8b
    Image:         registry.k8s.io/sig-storage/nfsplugin:v4.4.0
    Image ID:      registry.k8s.io/sig-storage/nfsplugin@sha256:971cb526bc0108cca7d8efda32102629cb362f99bbd4879fd381bea1b2014ab4
    Port:          29652/TCP
    Host Port:     29652/TCP
    Args:
      -v=5
      --nodeid=$(NODE_ID)
      --endpoint=$(CSI_ENDPOINT)
    State:          Running
      Started:      Sun, 24 Dec 2023 12:07:58 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      memory:  200Mi
    Requests:
      cpu:     10m
      memory:  20Mi
    Liveness:  http-get http://:healthz/healthz delay=30s timeout=10s period=30s #success=1 #failure=5
    Environment:
      NODE_ID:        (v1:spec.nodeName)
      CSI_ENDPOINT:  unix:///csi/csi.sock
    Mounts:
      /csi from socket-dir (rw)
      /var/lib/kubelet/pods from pods-mount-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-dpdf6 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  pods-mount-dir:
    Type:          HostPath (bare host directory volume)
    Path:          /var/lib/kubelet/pods
    HostPathType:  Directory
  socket-dir:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  kube-api-access-dpdf6:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   Burstable
Node-Selectors:              kubernetes.io/os=linux
Tolerations:                 node-role.kubernetes.io/control-plane:NoSchedule op=Exists
                             node-role.kubernetes.io/controlplane:NoSchedule op=Exists
                             node-role.kubernetes.io/master:NoSchedule op=Exists
                             node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason            Age   From               Message
  ----     ------            ----  ----               -------
  Warning  FailedScheduling  24m   default-scheduler  0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling..
  Warning  FailedScheduling  23m   default-scheduler  0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling..
  Normal   Scheduled         23m   default-scheduler  Successfully assigned kube-system/csi-nfs-controller-7b74694749-v2fqb to ix-truenas
  Normal   Pulled            23m   kubelet            Container image "registry.k8s.io/sig-storage/csi-provisioner:v3.5.0" already present on machine
  Normal   Created           23m   kubelet            Created container csi-provisioner
  Normal   Started           23m   kubelet            Started container csi-provisioner
  Normal   Pulled            23m   kubelet            Container image "registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2" already present on machine
  Normal   Created           23m   kubelet            Created container csi-snapshotter
  Normal   Started           23m   kubelet            Started container csi-snapshotter
  Normal   Pulled            23m   kubelet            Container image "registry.k8s.io/sig-storage/livenessprobe:v2.10.0" already present on machine
  Normal   Created           23m   kubelet            Created container liveness-probe
  Normal   Started           23m   kubelet            Started container liveness-probe
  Normal   Pulled            23m   kubelet            Container image "registry.k8s.io/sig-storage/nfsplugin:v4.4.0" already present on machine
  Normal   Created           23m   kubelet            Created container nfs
  Normal   Started           23m   kubelet            Started container nfs


Name:                 openebs-zfs-controller-0
Namespace:            kube-system
Priority:             900000000
Priority Class Name:  openebs-zfs-csi-controller-critical
Service Account:      openebs-zfs-controller-sa
Node:                 ix-truenas/192.168.1.3
Start Time:           Sun, 24 Dec 2023 12:07:53 -0500
Labels:               app=openebs-zfs-controller
                      controller-revision-hash=openebs-zfs-controller-77d96d5597
                      openebs.io/component-name=openebs-zfs-controller
                      openebs.io/version=2.3.0
                      role=openebs-zfs
                      statefulset.kubernetes.io/pod-name=openebs-zfs-controller-0
Annotations:          k8s.v1.cni.cncf.io/network-status:
                        [{
                            "name": "ix-net",
                            "interface": "eth0",
                            "ips": [
                                "172.16.0.236"
                            ],
                            "mac": "82:31:37:41:20:7b",
                            "default": true,
                            "dns": {},
                            "gateway": [
                                "172.16.0.1"
                            ]
                        }]
Status:               Running
IP:                   172.16.0.236
IPs:
  IP:           172.16.0.236
Controlled By:  StatefulSet/openebs-zfs-controller
Containers:
  csi-resizer:
    Container ID:  containerd://4ac01dd1ca7a174ae909b346e593228f1efbc70e63f44d74734088c8cddff373
    Image:         registry.k8s.io/sig-storage/csi-resizer:v1.8.0
    Image ID:      registry.k8s.io/sig-storage/csi-resizer@sha256:2e2b44393539d744a55b9370b346e8ebd95a77573064f3f9a8caf18c22f4d0d0
    Port:          <none>
    Host Port:     <none>
    Args:
      --v=5
      --csi-address=$(ADDRESS)
      --leader-election
    State:          Running
      Started:      Sun, 24 Dec 2023 12:08:00 -0500
    Ready:          True
    Restart Count:  0
    Environment:
      ADDRESS:  /var/lib/csi/sockets/pluginproxy/csi.sock
    Mounts:
      /var/lib/csi/sockets/pluginproxy/ from socket-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-8qw2g (ro)
  csi-snapshotter:
    Container ID:  containerd://78d70f7e62a68ccfc204795188e244acf4bf468f2bd50057e26ee3b62fd0be31
    Image:         registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2
    Image ID:      registry.k8s.io/sig-storage/csi-snapshotter@sha256:becc53e25b96573f61f7469923a92fb3e9d3a3781732159954ce0d9da07233a2
    Port:          <none>
    Host Port:     <none>
    Args:
      --csi-address=$(ADDRESS)
      --leader-election
    State:          Running
      Started:      Sun, 24 Dec 2023 12:08:00 -0500
    Ready:          True
    Restart Count:  0
    Environment:
      ADDRESS:  /var/lib/csi/sockets/pluginproxy/csi.sock
    Mounts:
      /var/lib/csi/sockets/pluginproxy/ from socket-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-8qw2g (ro)
  snapshot-controller:
    Container ID:  containerd://f5477abdf232447b0251a191188485737186204eb61416958f7ccdcca6ba3c4a
    Image:         registry.k8s.io/sig-storage/snapshot-controller:v6.2.2
    Image ID:      registry.k8s.io/sig-storage/snapshot-controller@sha256:fb95b65bb88f319f0f7d5397c401a654164f11a191f466b4026fa36085c7141b
    Port:          <none>
    Host Port:     <none>
    Args:
      --v=5
      --leader-election=true
    State:          Running
      Started:      Sun, 24 Dec 2023 12:08:01 -0500
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-8qw2g (ro)
  csi-provisioner:
    Container ID:  containerd://251d90a0697440218653d2835dbba21809feec261fce6fc2c92ebf916cfa9edc
    Image:         registry.k8s.io/sig-storage/csi-provisioner:v3.5.0
    Image ID:      registry.k8s.io/sig-storage/csi-provisioner@sha256:d078dc174323407e8cc6f0f9abd4efaac5db27838f1564d0253d5e3233e3f17f
    Port:          <none>
    Host Port:     <none>
    Args:
      --csi-address=$(ADDRESS)
      --v=5
      --feature-gates=Topology=true
      --strict-topology
      --leader-election
      --extra-create-metadata=true
      --enable-capacity=true
      --default-fstype=ext4
    State:          Running
      Started:      Sun, 24 Dec 2023 12:08:01 -0500
    Ready:          True
    Restart Count:  0
    Environment:
      ADDRESS:    /var/lib/csi/sockets/pluginproxy/csi.sock
      NAMESPACE:  kube-system (v1:metadata.namespace)
      POD_NAME:   openebs-zfs-controller-0 (v1:metadata.name)
    Mounts:
      /var/lib/csi/sockets/pluginproxy/ from socket-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-8qw2g (ro)
  openebs-zfs-plugin:
    Container ID:  containerd://9e5cc15934b9244e3f4ca2d892ea65f2db6d5334da6cd62d2b5454821ff167b6
    Image:         openebs/zfs-driver:2.3.0
    Image ID:      docker.io/openebs/zfs-driver@sha256:42b8d31edc7cdf4116ecd6369f89a1d477bb9b64e695d074d290789af1a461dd
    Port:          <none>
    Host Port:     <none>
    Args:
      --endpoint=$(OPENEBS_CSI_ENDPOINT)
      --plugin=$(OPENEBS_CONTROLLER_DRIVER)
    State:          Running
      Started:      Sun, 24 Dec 2023 12:08:01 -0500
    Ready:          True
    Restart Count:  0
    Environment:
      OPENEBS_CONTROLLER_DRIVER:    controller
      OPENEBS_CSI_ENDPOINT:         unix:///var/lib/csi/sockets/pluginproxy/csi.sock
      OPENEBS_NAMESPACE:            openebs
      OPENEBS_IO_INSTALLER_TYPE:    zfs-operator
      OPENEBS_IO_ENABLE_ANALYTICS:  true
    Mounts:
      /var/lib/csi/sockets/pluginproxy/ from socket-dir (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-8qw2g (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  socket-dir:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  kube-api-access-8qw2g:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason            Age   From               Message
  ----     ------            ----  ----               -------
  Warning  FailedScheduling  24m   default-scheduler  0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling..
  Warning  FailedScheduling  23m   default-scheduler  0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling..
  Normal   Scheduled         23m   default-scheduler  Successfully assigned kube-system/openebs-zfs-controller-0 to ix-truenas
  Normal   AddedInterface    23m   multus             Add eth0 [172.16.0.236/16] from ix-net
  Normal   Pulled            23m   kubelet            Container image "registry.k8s.io/sig-storage/csi-resizer:v1.8.0" already present on machine
  Normal   Created           23m   kubelet            Created container csi-resizer
  Normal   Started           23m   kubelet            Started container csi-resizer
  Normal   Pulled            23m   kubelet            Container image "registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2" already present on machine
  Normal   Created           23m   kubelet            Created container csi-snapshotter
  Normal   Started           23m   kubelet            Started container csi-snapshotter
  Normal   Pulled            23m   kubelet            Container image "registry.k8s.io/sig-storage/snapshot-controller:v6.2.2" already present on machine
  Normal   Created           23m   kubelet            Created container snapshot-controller
  Normal   Started           23m   kubelet            Started container snapshot-controller
  Normal   Pulled            23m   kubelet            Container image "registry.k8s.io/sig-storage/csi-provisioner:v3.5.0" already present on machine
  Normal   Created           23m   kubelet            Created container csi-provisioner
  Normal   Started           23m   kubelet            Started container csi-provisioner
  Normal   Pulled            23m   kubelet            Container image "openebs/zfs-driver:2.3.0" already present on machine
  Normal   Created           23m   kubelet            Created container openebs-zfs-plugin
  Normal   Started           23m   kubelet            Started container openebs-zfs-plugin


Name:                 nvidia-device-plugin-daemonset-pjd4w
Namespace:            kube-system
Priority:             2000001000
Priority Class Name:  system-node-critical
Runtime Class Name:   nvidia
Service Account:      default
Node:                 ix-truenas/192.168.1.3
Start Time:           Sun, 24 Dec 2023 12:07:53 -0500
Labels:               controller-revision-hash=959889769
                      name=nvidia-device-plugin-ds
                      pod-template-generation=2
Annotations:          k8s.v1.cni.cncf.io/network-status:
                        [{
                            "name": "ix-net",
                            "interface": "eth0",
                            "ips": [
                                "172.16.0.237"
                            ],
                            "mac": "46:0f:c3:f5:b9:48",
                            "default": true,
                            "dns": {},
                            "gateway": [
                                "172.16.0.1"
                            ]
                        }]
                      scheduler.alpha.kubernetes.io/critical-pod: 
Status:               Running
IP:                   172.16.0.237
IPs:
  IP:           172.16.0.237
Controlled By:  DaemonSet/nvidia-device-plugin-daemonset
Containers:
  nvidia-device-plugin-ctr:
    Container ID:  containerd://0d474da2f5e6cb3a07a4fc3739a2c047aca41779309c50e94ccc832fa616dbcd
    Image:         nvcr.io/nvidia/k8s-device-plugin:v0.13.0
    Image ID:      nvcr.io/nvidia/k8s-device-plugin@sha256:e8343db286ac349f213d7b84e65c0d559d6310e74446986a09b66b21913eef12
    Port:          <none>
    Host Port:     <none>
    Command:
      nvidia-device-plugin
      --config-file
      /etc/config/nvdefault.yaml
    State:          Running
      Started:      Sun, 24 Dec 2023 12:08:03 -0500
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /etc/config from plugin-config (rw)
      /var/lib/kubelet/device-plugins from device-plugin (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-fqtnh (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  device-plugin:
    Type:          HostPath (bare host directory volume)
    Path:          /var/lib/kubelet/device-plugins
    HostPathType:  
  plugin-config:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      nvidia-device-plugin-config
    Optional:  false
  kube-api-access-fqtnh:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 CriticalAddonsOnly op=Exists
                             node.kubernetes.io/disk-pressure:NoSchedule op=Exists
                             node.kubernetes.io/memory-pressure:NoSchedule op=Exists
                             node.kubernetes.io/not-ready:NoExecute op=Exists
                             node.kubernetes.io/pid-pressure:NoSchedule op=Exists
                             node.kubernetes.io/unreachable:NoExecute op=Exists
                             node.kubernetes.io/unschedulable:NoSchedule op=Exists
                             nvidia.com/gpu:NoSchedule op=Exists
Events:
  Type    Reason          Age   From               Message
  ----    ------          ----  ----               -------
  Normal  Scheduled       23m   default-scheduler  Successfully assigned kube-system/nvidia-device-plugin-daemonset-pjd4w to ix-truenas
  Normal  AddedInterface  23m   multus             Add eth0 [172.16.0.237/16] from ix-net
  Normal  Pulled          23m   kubelet            Container image "nvcr.io/nvidia/k8s-device-plugin:v0.13.0" already present on machine
  Normal  Created         23m   kubelet            Created container nvidia-device-plugin-ctr
  Normal  Started         23m   kubelet            Started container nvidia-device-plugin-ctr


Name:             openspeedtest-677989cc6f-w4jdx
Namespace:        ix-openspeedtest
Priority:         0
Service Account:  default
Node:             ix-truenas/192.168.1.3
Start Time:       Sun, 24 Dec 2023 12:07:53 -0500
Labels:           app=openspeedtest-6.0.10
                  app.kubernetes.io/instance=openspeedtest
                  app.kubernetes.io/managed-by=Helm
                  app.kubernetes.io/name=openspeedtest
                  app.kubernetes.io/version=latest
                  helm-revision=15
                  helm.sh/chart=openspeedtest-6.0.10
                  pod-template-hash=677989cc6f
                  pod.name=main
                  release=openspeedtest
Annotations:      k8s.v1.cni.cncf.io/network-status:
                    [{
                        "name": "ix-net",
                        "interface": "eth0",
                        "ips": [
                            "172.16.0.235"
                        ],
                        "mac": "42:3a:0b:fd:b2:0f",
                        "default": true,
                        "dns": {},
                        "gateway": [
                            "172.16.0.1"
                        ]
                    }]
                  rollme: itHyW
Status:           Running
IP:               172.16.0.235
IPs:
  IP:           172.16.0.235
Controlled By:  ReplicaSet/openspeedtest-677989cc6f
Containers:
  openspeedtest:
    Container ID:   containerd://1981d47d77b4b922c3edde241bd65eb0b2bf6779c7c3290229433220cc5bdc19
    Image:          openspeedtest/latest:latest@sha256:948800e96e8369171407bba9607a05fe2a6c742fa0a038869ffcbb3ab2585c82
    Image ID:       docker.io/openspeedtest/latest@sha256:948800e96e8369171407bba9607a05fe2a6c742fa0a038869ffcbb3ab2585c82
    Port:           3000/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Sun, 24 Dec 2023 12:07:57 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     4
      memory:  8Gi
    Requests:
      cpu:      10m
      memory:   50Mi
    Liveness:   http-get http://:3000/ delay=10s timeout=5s period=10s #success=1 #failure=5
    Readiness:  http-get http://:3000/ delay=10s timeout=5s period=10s #success=2 #failure=5
    Startup:    tcp-socket :3000 delay=10s timeout=2s period=5s #success=1 #failure=60
    Environment:
      TZ:                      America/New_York
      UMASK:                   0022
      UMASK_SET:               0022
      NVIDIA_VISIBLE_DEVICES:  void
      PUID:                    568
      USER_ID:                 568
      UID:                     568
      PGID:                    568
      GROUP_ID:                568
      GID:                     568
    Mounts:
      /dev/shm from devshm (rw)
      /shared from shared (rw)
      /tmp from tmp (rw)
      /var/logs from varlogs (rw)
      /var/run from varrun (rw)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  devshm:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
  shared:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  tmp:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
  varlogs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
  varrun:
    Type:                     EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:                   Memory
    SizeLimit:                8Gi
QoS Class:                    Burstable
Node-Selectors:               kubernetes.io/arch=amd64
Tolerations:                  node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                              node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Topology Spread Constraints:  kubernetes.io/hostname:ScheduleAnyway when max skew 1 is exceeded for selector app.kubernetes.io/instance=openspeedtest,app.kubernetes.io/name=openspeedtest,pod.name=openspeedtest
                              truecharts.org/rack:ScheduleAnyway when max skew 1 is exceeded for selector app.kubernetes.io/instance=openspeedtest,app.kubernetes.io/name=openspeedtest,pod.name=openspeedtest
Events:
  Type     Reason            Age   From               Message
  ----     ------            ----  ----               -------
  Warning  FailedScheduling  24m   default-scheduler  0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling..
  Warning  FailedScheduling  23m   default-scheduler  0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling..
  Normal   Scheduled         23m   default-scheduler  Successfully assigned ix-openspeedtest/openspeedtest-677989cc6f-w4jdx to ix-truenas
  Normal   AddedInterface    23m   multus             Add eth0 [172.16.0.235/16] from ix-net
  Normal   Pulled            23m   kubelet            Container image "openspeedtest/latest:latest@sha256:948800e96e8369171407bba9607a05fe2a6c742fa0a038869ffcbb3ab2585c82" already present on machine
  Normal   Created           23m   kubelet            Created container openspeedtest
  Normal   Started           23m   kubelet            Started container openspeedtest


Name:             unifi-749498766b-g88d9
Namespace:        ix-unifi
Priority:         0
Service Account:  default
Node:             ix-truenas/192.168.1.3
Start Time:       Sun, 24 Dec 2023 12:07:53 -0500
Labels:           app=unifi-18.0.10
                  app.kubernetes.io/instance=unifi
                  app.kubernetes.io/managed-by=Helm
                  app.kubernetes.io/name=unifi
                  app.kubernetes.io/version=8.0.7
                  helm-revision=12
                  helm.sh/chart=unifi-18.0.10
                  pod-template-hash=749498766b
                  pod.name=main
                  release=unifi
Annotations:      rollme: O4PII
Status:           Running
IP:               192.168.1.3
IPs:
  IP:           192.168.1.3
Controlled By:  ReplicaSet/unifi-749498766b
Init Containers:
  unifi-init-migrate:
    Container ID:  containerd://ea90df98e9be21d358de2540aed384b3221175cb12c308563d9458942b45ce2c
    Image:         tccr.io/truecharts/alpine:v3.18.4@sha256:51c4ca9a8213d3f4026bd560e8aa2de365d275d6cd0298eff2ae20671fef34e9
    Image ID:      tccr.io/truecharts/alpine@sha256:51c4ca9a8213d3f4026bd560e8aa2de365d275d6cd0298eff2ae20671fef34e9
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/sh
    Args:
      -c
      newdatadir="/usr/lib/unifi/data"
      olddatadir="/usr/lib/unifi/olddata/data"
      # Check the dir exists
      [ ! -d "$newdatadir" ] && echo "$newdatadir missing" && exit 1
      # Check if there is a data/data dir to migrate
      [ ! -d "$olddatadir" ] && echo "No $olddatadir dir found. Migration skipped" && exit 0
      
      # Check if the new data dir is empty, ignoring the old data dir
      dirs=$(ls -A "$newdatadir" | grep -v "data")
      if [ -n "$dirs" ]; then
        echo "New data dir is empty. Migrating data one level up"
        cp -rf $olddatadir/* $newdatadir || echo "Failed to move data" && exit 1
        echo "Data migration complete"
      fi
      
    State:          Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Sun, 24 Dec 2023 12:08:05 -0500
      Finished:     Sun, 24 Dec 2023 12:08:05 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     4
      memory:  8Gi
    Requests:
      cpu:     10m
      memory:  50Mi
    Environment:
      TZ:                      America/New_York
      UMASK:                   0022
      UMASK_SET:               0022
      NVIDIA_VISIBLE_DEVICES:  void
      S6_READ_ONLY_ROOT:       1
    Mounts:
      /dev/shm from devshm (rw)
      /shared from shared (rw)
      /tmp from tmp (rw)
      /usr/lib/unifi/data from data (rw)
      /usr/lib/unifi/olddata from config (rw)
      /var/logs from varlogs (rw)
      /var/run from varrun (rw)
Containers:
  unifi:
    Container ID:   containerd://35489b13dad3bd924a14ae05cc0b2929e10764496c49791cb7fda6da6db8a587
    Image:          ghcr.io/goofball222/unifi:8.0.7@sha256:218565257fdb4b2c7f27b8e4fdb4925e9348fd7cfb3ed027974bebf78bdc45ba
    Image ID:       ghcr.io/goofball222/unifi@sha256:218565257fdb4b2c7f27b8e4fdb4925e9348fd7cfb3ed027974bebf78bdc45ba
    Ports:          8080/TCP, 8880/TCP, 8843/TCP, 8443/TCP, 6789/TCP, 3478/UDP
    Host Ports:     8080/TCP, 8880/TCP, 8843/TCP, 8443/TCP, 6789/TCP, 3478/UDP
    State:          Running
      Started:      Sun, 24 Dec 2023 12:08:06 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     4
      memory:  8Gi
    Requests:
      cpu:      10m
      memory:   50Mi
    Liveness:   http-get https://:8443/ delay=10s timeout=5s period=10s #success=1 #failure=5
    Readiness:  http-get https://:8443/ delay=10s timeout=5s period=10s #success=2 #failure=5
    Startup:    tcp-socket :8443 delay=10s timeout=2s period=5s #success=1 #failure=60
    Environment:
      TZ:                      America/New_York
      UMASK:                   0022
      UMASK_SET:               0022
      NVIDIA_VISIBLE_DEVICES:  void
      S6_READ_ONLY_ROOT:       1
      DB_MONGO_LOCAL:          true
      RUN_CHOWN:               true
    Mounts:
      /dev/shm from devshm (rw)
      /shared from shared (rw)
      /tmp from tmp (rw)
      /usr/lib/unifi/cert from certs (rw)
      /usr/lib/unifi/data from data (rw)
      /usr/lib/unifi/logs from logs (rw)
      /usr/lib/unifi/olddata from config (rw)
      /var/logs from varlogs (rw)
      /var/run from varrun (rw)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  certs:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  unifi-certs
    ReadOnly:   false
  config:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  unifi-config
    ReadOnly:   false
  data:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  unifi-data
    ReadOnly:   false
  devshm:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
  logs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  shared:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  tmp:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
  varlogs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
  varrun:
    Type:                     EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:                   Memory
    SizeLimit:                8Gi
QoS Class:                    Burstable
Node-Selectors:               kubernetes.io/arch=amd64
Tolerations:                  node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                              node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Topology Spread Constraints:  kubernetes.io/hostname:ScheduleAnyway when max skew 1 is exceeded for selector app.kubernetes.io/instance=unifi,app.kubernetes.io/name=unifi,pod.name=unifi
                              truecharts.org/rack:ScheduleAnyway when max skew 1 is exceeded for selector app.kubernetes.io/instance=unifi,app.kubernetes.io/name=unifi,pod.name=unifi
Events:
  Type     Reason            Age                 From               Message
  ----     ------            ----                ----               -------
  Warning  FailedScheduling  24m                 default-scheduler  0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling..
  Warning  FailedScheduling  23m                 default-scheduler  0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling..
  Normal   Scheduled         23m                 default-scheduler  Successfully assigned ix-unifi/unifi-749498766b-g88d9 to ix-truenas
  Warning  FailedMount       23m (x7 over 23m)   kubelet            MountVolume.SetUp failed for volume "pvc-2642dc33-3165-4abb-9550-a09e93b3764e" : kubernetes.io/csi: mounter.SetUpAt failed to get CSI client: driver name zfs.csi.openebs.io not found in the list of registered CSI drivers
  Warning  FailedMount       23m (x15 over 23m)  kubelet            MountVolume.SetUp failed for volume "pvc-02f02c5d-104e-4373-833c-c9efd538f1dd" : kubernetes.io/csi: mounter.SetUpAt failed to get CSI client: driver name zfs.csi.openebs.io not found in the list of registered CSI drivers
  Warning  FailedMount       23m (x3 over 23m)   kubelet            MountVolume.SetUp failed for volume "pvc-e391b34b-230f-42da-b4cf-7c111e22a515" : kubernetes.io/csi: mounter.SetUpAt failed to get CSI client: driver name zfs.csi.openebs.io not found in the list of registered CSI drivers


Name:             makemkv-859dc585d8-dxw8k
Namespace:        ix-makemkv
Priority:         0
Service Account:  default
Node:             ix-truenas/192.168.1.3
Start Time:       Sun, 24 Dec 2023 12:07:53 -0500
Labels:           app=makemkv-7.0.10
                  app.kubernetes.io/instance=makemkv
                  app.kubernetes.io/managed-by=Helm
                  app.kubernetes.io/name=makemkv
                  app.kubernetes.io/version=23.11.2
                  helm-revision=20
                  helm.sh/chart=makemkv-7.0.10
                  pod-template-hash=859dc585d8
                  pod.name=main
                  release=makemkv
Annotations:      k8s.v1.cni.cncf.io/network-status:
                    [{
                        "name": "ix-net",
                        "interface": "eth0",
                        "ips": [
                            "172.16.0.241"
                        ],
                        "mac": "ce:2e:67:30:6a:ef",
                        "default": true,
                        "dns": {},
                        "gateway": [
                            "172.16.0.1"
                        ]
                    }]
                  rollme: 3n1Fm
Status:           Running
IP:               172.16.0.241
IPs:
  IP:           172.16.0.241
Controlled By:  ReplicaSet/makemkv-859dc585d8
Containers:
  makemkv:
    Container ID:   containerd://525c23db188205f369aff109fcc41206a29c56748b41e66ff8f3bfd1663aa4b1
    Image:          jlesage/makemkv:v23.11.2@sha256:777c9cba43c8288e9e3d4ea7347ad427938b18bcf84b3b7b1bc69e7d9bbb5762
    Image ID:       docker.io/jlesage/makemkv@sha256:777c9cba43c8288e9e3d4ea7347ad427938b18bcf84b3b7b1bc69e7d9bbb5762
    Ports:          5800/TCP, 5900/TCP
    Host Ports:     0/TCP, 0/TCP
    State:          Running
      Started:      Sun, 24 Dec 2023 12:08:05 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     4
      memory:  8Gi
    Requests:
      cpu:      10m
      memory:   50Mi
    Liveness:   tcp-socket :5800 delay=10s timeout=5s period=10s #success=1 #failure=5
    Readiness:  tcp-socket :5800 delay=10s timeout=5s period=10s #success=2 #failure=5
    Startup:    tcp-socket :5800 delay=10s timeout=2s period=5s #success=1 #failure=60
    Environment Variables from:
      makemkv-makemkv  ConfigMap  Optional: false
    Environment:
      TZ:                      America/New_York
      UMASK:                   0022
      UMASK_SET:               0022
      NVIDIA_VISIBLE_DEVICES:  void
      PUID:                    568
      USER_ID:                 568
      UID:                     568
      PGID:                    568
      GROUP_ID:                568
      GID:                     568
      VNC_PASSWORD:            
    Mounts:
      /config from config (rw)
      /dev/shm from devshm (rw)
      /dev/sr0 from device-0 (rw)
      /mnt/rips from persist-list-0 (rw)
      /output from output (rw)
      /shared from shared (rw)
      /storage from storage (rw)
      /tmp from tmp (rw)
      /var/logs from varlogs (rw)
      /var/run from varrun (rw)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  config:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  makemkv-config
    ReadOnly:   false
  device-0:
    Type:          HostPath (bare host directory volume)
    Path:          /dev/sr0
    HostPathType:  
  devshm:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
  output:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  makemkv-output
    ReadOnly:   false
  persist-list-0:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/Plex/rips
    HostPathType:  
  shared:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  storage:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  makemkv-storage
    ReadOnly:   false
  tmp:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
  varlogs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
  varrun:
    Type:                     EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:                   Memory
    SizeLimit:                8Gi
QoS Class:                    Burstable
Node-Selectors:               kubernetes.io/arch=amd64
Tolerations:                  node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                              node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Topology Spread Constraints:  kubernetes.io/hostname:ScheduleAnyway when max skew 1 is exceeded for selector app.kubernetes.io/instance=makemkv,app.kubernetes.io/name=makemkv,pod.name=makemkv
                              truecharts.org/rack:ScheduleAnyway when max skew 1 is exceeded for selector app.kubernetes.io/instance=makemkv,app.kubernetes.io/name=makemkv,pod.name=makemkv
Events:
  Type     Reason            Age                 From               Message
  ----     ------            ----                ----               -------
  Warning  FailedScheduling  24m                 default-scheduler  0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling..
  Warning  FailedScheduling  23m                 default-scheduler  0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling..
  Normal   Scheduled         23m                 default-scheduler  Successfully assigned ix-makemkv/makemkv-859dc585d8-dxw8k to ix-truenas
  Warning  FailedMount       23m (x14 over 23m)  kubelet            MountVolume.MountDevice failed for volume "pvc-f8136318-5fc2-4ced-a232-ca4e9e32de6b" : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name zfs.csi.openebs.io not found in the list of registered CSI drivers
  Warning  FailedMount       23m (x9 over 23m)   kubelet            MountVolume.MountDevice failed for volume "pvc-d4bd0566-7fdc-4126-bddd-a8acba9ce2ed" : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name zfs.csi.openebs.io not found in the list of registered CSI drivers
  Warning  FailedMount       23m (x2 over 23m)   kubelet            MountVolume.MountDevice failed for volume "pvc-4cd76815-b8e4-4c85-ac94-5ba3783fd215" : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name zfs.csi.openebs.io not found in the list of registered CSI drivers
  Normal   AddedInterface    23m                 multus             Add eth0 [172.16.0.241/16] from ix-net


Name:             tailscale-6d686c8d6b-nqw5g
Namespace:        ix-tailscale
Priority:         0
Service Account:  tailscale
Node:             ix-truenas/192.168.1.3
Start Time:       Sun, 24 Dec 2023 12:14:00 -0500
Labels:           app=tailscale-6.0.10
                  app.kubernetes.io/instance=tailscale
                  app.kubernetes.io/managed-by=Helm
                  app.kubernetes.io/name=tailscale
                  app.kubernetes.io/version=1.56.0
                  helm-revision=25
                  helm.sh/chart=tailscale-6.0.10
                  pod-template-hash=6d686c8d6b
                  pod.name=main
                  release=tailscale
Annotations:      rollme: W08RC
Status:           Running
IP:               192.168.1.3
IPs:
  IP:           192.168.1.3
Controlled By:  ReplicaSet/tailscale-6d686c8d6b
Containers:
  tailscale:
    Container ID:  containerd://e94ef8db01abca3ff4637090130f8f5a227d97ca9c9a1f5b05e97d1e1d720d94
    Image:         tailscale/tailscale:v1.56.0@sha256:ed1f9317d0bab2bc17f6eecc29401479b91c938df48c28b1bd3d3014eba9d013
    Image ID:      docker.io/tailscale/tailscale@sha256:ed1f9317d0bab2bc17f6eecc29401479b91c938df48c28b1bd3d3014eba9d013
    Port:          <none>
    Host Port:     <none>
    Command:
      /usr/local/bin/containerboot
    State:          Running
      Started:      Sun, 24 Dec 2023 12:14:01 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     4
      memory:  8Gi
    Requests:
      cpu:     10m
      memory:  50Mi
    Environment Variables from:
      tailscale-tailscale-config  ConfigMap  Optional: false
    Environment:
      TZ:                      America/New_York
      UMASK:                   0022
      UMASK_SET:               0022
      NVIDIA_VISIBLE_DEVICES:  void
      PUID:                    568
      USER_ID:                 568
      UID:                     568
      PGID:                    568
      GROUP_ID:                568
      GID:                     568
    Mounts:
      /dev/net/tun from tun (rw)
      /dev/shm from devshm (rw)
      /shared from shared (rw)
      /tmp from tmp (rw)
      /var/logs from varlogs (rw)
      /var/run from varrun (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-nxldm (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  devshm:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
  shared:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  tmp:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
  tun:
    Type:          HostPath (bare host directory volume)
    Path:          /dev/net/tun
    HostPathType:  
  varlogs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
  varrun:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  8Gi
  kube-api-access-nxldm:
    Type:                     Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:   3607
    ConfigMapName:            kube-root-ca.crt
    ConfigMapOptional:        <nil>
    DownwardAPI:              true
QoS Class:                    Burstable
Node-Selectors:               kubernetes.io/arch=amd64
Tolerations:                  node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                              node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Topology Spread Constraints:  kubernetes.io/hostname:ScheduleAnyway when max skew 1 is exceeded for selector app.kubernetes.io/instance=tailscale,app.kubernetes.io/name=tailscale,pod.name=tailscale
                              truecharts.org/rack:ScheduleAnyway when max skew 1 is exceeded for selector app.kubernetes.io/instance=tailscale,app.kubernetes.io/name=tailscale,pod.name=tailscale
Events:
  Type    Reason     Age   From               Message
  ----    ------     ----  ----               -------
  Normal  Scheduled  17m   default-scheduler  Successfully assigned ix-tailscale/tailscale-6d686c8d6b-nqw5g to ix-truenas
  Normal  Pulled     17m   kubelet            Container image "tailscale/tailscale:v1.56.0@sha256:ed1f9317d0bab2bc17f6eecc29401479b91c938df48c28b1bd3d3014eba9d013" already present on machine
  Normal  Created    17m   kubelet            Created container tailscale
  Normal  Started    17m   kubelet            Started container tailscale


Name:                plex-7d74cbd9b7-qn5dt
Namespace:           ix-plex
Priority:            0
Runtime Class Name:  nvidia
Service Account:     default
Node:                ix-truenas/
Start Time:          Sun, 24 Dec 2023 12:16:30 -0500
Labels:              app.kubernetes.io/instance=plex
                     app.kubernetes.io/name=plex
                     pod-template-hash=7d74cbd9b7
Annotations:         rollme: QeUzT
Status:              Failed
Reason:              UnexpectedAdmissionError
Message:             Pod was rejected: Allocate failed due to rpc error: code = Unknown desc = request for 'nvidia.com/gpu: 4' too large: maximum request size for shared resources is 1, which is unexpected
IP:                  
IPs:                 <none>
Controlled By:       ReplicaSet/plex-7d74cbd9b7
Containers:
  plex:
    Image:       plexinc/pms-docker:plexpass
    Ports:       32400/TCP, 32469/TCP, 1900/UDP, 32410/UDP, 32412/UDP, 32413/UDP, 32414/UDP
    Host Ports:  32400/TCP, 32469/TCP, 1900/UDP, 32410/UDP, 32412/UDP, 32413/UDP, 32414/UDP
    Limits:
      amd.com/gpu:         0
      gpu.intel.com/i915:  0
      nvidia.com/gpu:      4
    Requests:
      amd.com/gpu:         0
      cpu:                 10m
      gpu.intel.com/i915:  0
      memory:              50Mi
      nvidia.com/gpu:      4
    Liveness:              tcp-socket :32400 delay=10s timeout=5s period=10s #success=1 #failure=5
    Readiness:             tcp-socket :32400 delay=10s timeout=5s period=10s #success=2 #failure=5
    Startup:               tcp-socket :32400 delay=10s timeout=2s period=5s #success=1 #failure=60
    Environment:
      KUBE_NAMESPACE:              ix-plex (v1:metadata.namespace)
      POD_IP:                       (v1:status.podIP)
      NVIDIA_DRIVER_CAPABILITIES:  all
      TZ:                          America/New_York
      PLEX_CLAIM:                  claim-xqNjvLPfxzjEUg1J2AzW
      PMS_INTERNAL_ADDRESS:        http://plex:32400
      PMS_IMAGE:                   plexinc/pms-docker:1.32.8.7639-fb6452ebf
    Mounts:
      /config from config (rw)
      /config/Library/Application Support/Plex Media Server/Logs from shared-logs (rw)
      /data from data (rw)
      /shared from shared (rw)
      /transcode from transcode (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-9gqxm (ro)
      Movies from extrappvolume-0 (rw)
Volumes:
  config:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_config
    HostPathType:  
  data:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_data
    HostPathType:  
  shared:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  shared-logs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  transcode:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_transcode
    HostPathType:  
  extrappvolume-0:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Rusty/Spinners/Plex
    HostPathType:  
  kube-api-access-9gqxm:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   Burstable
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason                    Age   From               Message
  ----     ------                    ----  ----               -------
  Normal   Scheduled                 14m   default-scheduler  Successfully assigned ix-plex/plex-7d74cbd9b7-qn5dt to ix-truenas
  Warning  UnexpectedAdmissionError  14m   kubelet            Allocate failed due to rpc error: code = Unknown desc = request for 'nvidia.com/gpu: 4' too large: maximum request size for shared resources is 1, which is unexpected


Name:                plex-7d74cbd9b7-gz2gd
Namespace:           ix-plex
Priority:            0
Runtime Class Name:  nvidia
Service Account:     default
Node:                ix-truenas/
Start Time:          Sun, 24 Dec 2023 12:16:30 -0500
Labels:              app.kubernetes.io/instance=plex
                     app.kubernetes.io/name=plex
                     pod-template-hash=7d74cbd9b7
Annotations:         rollme: QeUzT
Status:              Failed
Reason:              UnexpectedAdmissionError
Message:             Pod was rejected: Allocate failed due to rpc error: code = Unknown desc = request for 'nvidia.com/gpu: 4' too large: maximum request size for shared resources is 1, which is unexpected
IP:                  
IPs:                 <none>
Controlled By:       ReplicaSet/plex-7d74cbd9b7
Containers:
  plex:
    Image:       plexinc/pms-docker:plexpass
    Ports:       32400/TCP, 32469/TCP, 1900/UDP, 32410/UDP, 32412/UDP, 32413/UDP, 32414/UDP
    Host Ports:  32400/TCP, 32469/TCP, 1900/UDP, 32410/UDP, 32412/UDP, 32413/UDP, 32414/UDP
    Limits:
      amd.com/gpu:         0
      gpu.intel.com/i915:  0
      nvidia.com/gpu:      4
    Requests:
      amd.com/gpu:         0
      cpu:                 10m
      gpu.intel.com/i915:  0
      memory:              50Mi
      nvidia.com/gpu:      4
    Liveness:              tcp-socket :32400 delay=10s timeout=5s period=10s #success=1 #failure=5
    Readiness:             tcp-socket :32400 delay=10s timeout=5s period=10s #success=2 #failure=5
    Startup:               tcp-socket :32400 delay=10s timeout=2s period=5s #success=1 #failure=60
    Environment:
      KUBE_NAMESPACE:              ix-plex (v1:metadata.namespace)
      POD_IP:                       (v1:status.podIP)
      NVIDIA_DRIVER_CAPABILITIES:  all
      TZ:                          America/New_York
      PLEX_CLAIM:                  claim-xqNjvLPfxzjEUg1J2AzW
      PMS_INTERNAL_ADDRESS:        http://plex:32400
      PMS_IMAGE:                   plexinc/pms-docker:1.32.8.7639-fb6452ebf
    Mounts:
      /config from config (rw)
      /config/Library/Application Support/Plex Media Server/Logs from shared-logs (rw)
      /data from data (rw)
      /shared from shared (rw)
      /transcode from transcode (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-cvxwn (ro)
      Movies from extrappvolume-0 (rw)
Volumes:
  config:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_config
    HostPathType:  
  data:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_data
    HostPathType:  
  shared:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  shared-logs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  transcode:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_transcode
    HostPathType:  
  extrappvolume-0:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Rusty/Spinners/Plex
    HostPathType:  
  kube-api-access-cvxwn:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   Burstable
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason                    Age   From               Message
  ----     ------                    ----  ----               -------
  Normal   Scheduled                 14m   default-scheduler  Successfully assigned ix-plex/plex-7d74cbd9b7-gz2gd to ix-truenas
  Warning  UnexpectedAdmissionError  14m   kubelet            Allocate failed due to rpc error: code = Unknown desc = request for 'nvidia.com/gpu: 4' too large: maximum request size for shared resources is 1, which is unexpected


Name:                plex-7d74cbd9b7-vfn55
Namespace:           ix-plex
Priority:            0
Runtime Class Name:  nvidia
Service Account:     default
Node:                ix-truenas/
Start Time:          Sun, 24 Dec 2023 12:16:31 -0500
Labels:              app.kubernetes.io/instance=plex
                     app.kubernetes.io/name=plex
                     pod-template-hash=7d74cbd9b7
Annotations:         rollme: QeUzT
Status:              Failed
Reason:              UnexpectedAdmissionError
Message:             Pod was rejected: Allocate failed due to rpc error: code = Unknown desc = request for 'nvidia.com/gpu: 4' too large: maximum request size for shared resources is 1, which is unexpected
IP:                  
IPs:                 <none>
Controlled By:       ReplicaSet/plex-7d74cbd9b7
Containers:
  plex:
    Image:       plexinc/pms-docker:plexpass
    Ports:       32400/TCP, 32469/TCP, 1900/UDP, 32410/UDP, 32412/UDP, 32413/UDP, 32414/UDP
    Host Ports:  32400/TCP, 32469/TCP, 1900/UDP, 32410/UDP, 32412/UDP, 32413/UDP, 32414/UDP
    Limits:
      amd.com/gpu:         0
      gpu.intel.com/i915:  0
      nvidia.com/gpu:      4
    Requests:
      amd.com/gpu:         0
      cpu:                 10m
      gpu.intel.com/i915:  0
      memory:              50Mi
      nvidia.com/gpu:      4
    Liveness:              tcp-socket :32400 delay=10s timeout=5s period=10s #success=1 #failure=5
    Readiness:             tcp-socket :32400 delay=10s timeout=5s period=10s #success=2 #failure=5
    Startup:               tcp-socket :32400 delay=10s timeout=2s period=5s #success=1 #failure=60
    Environment:
      KUBE_NAMESPACE:              ix-plex (v1:metadata.namespace)
      POD_IP:                       (v1:status.podIP)
      NVIDIA_DRIVER_CAPABILITIES:  all
      TZ:                          America/New_York
      PLEX_CLAIM:                  claim-xqNjvLPfxzjEUg1J2AzW
      PMS_INTERNAL_ADDRESS:        http://plex:32400
      PMS_IMAGE:                   plexinc/pms-docker:1.32.8.7639-fb6452ebf
    Mounts:
      /config from config (rw)
      /config/Library/Application Support/Plex Media Server/Logs from shared-logs (rw)
      /data from data (rw)
      /shared from shared (rw)
      /transcode from transcode (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-c997k (ro)
      Movies from extrappvolume-0 (rw)
Volumes:
  config:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_config
    HostPathType:  
  data:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_data
    HostPathType:  
  shared:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  shared-logs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  transcode:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_transcode
    HostPathType:  
  extrappvolume-0:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Rusty/Spinners/Plex
    HostPathType:  
  kube-api-access-c997k:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   Burstable
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason                    Age   From               Message
  ----     ------                    ----  ----               -------
  Normal   Scheduled                 14m   default-scheduler  Successfully assigned ix-plex/plex-7d74cbd9b7-vfn55 to ix-truenas
  Warning  UnexpectedAdmissionError  14m   kubelet            Allocate failed due to rpc error: code = Unknown desc = request for 'nvidia.com/gpu: 4' too large: maximum request size for shared resources is 1, which is unexpected


Name:                plex-7d74cbd9b7-lwrkk
Namespace:           ix-plex
Priority:            0
Runtime Class Name:  nvidia
Service Account:     default
Node:                ix-truenas/
Start Time:          Sun, 24 Dec 2023 12:16:31 -0500
Labels:              app.kubernetes.io/instance=plex
                     app.kubernetes.io/name=plex
                     pod-template-hash=7d74cbd9b7
Annotations:         rollme: QeUzT
Status:              Failed
Reason:              UnexpectedAdmissionError
Message:             Pod was rejected: Allocate failed due to rpc error: code = Unknown desc = request for 'nvidia.com/gpu: 4' too large: maximum request size for shared resources is 1, which is unexpected
IP:                  
IPs:                 <none>
Controlled By:       ReplicaSet/plex-7d74cbd9b7
Containers:
  plex:
    Image:       plexinc/pms-docker:plexpass
    Ports:       32400/TCP, 32469/TCP, 1900/UDP, 32410/UDP, 32412/UDP, 32413/UDP, 32414/UDP
    Host Ports:  32400/TCP, 32469/TCP, 1900/UDP, 32410/UDP, 32412/UDP, 32413/UDP, 32414/UDP
    Limits:
      amd.com/gpu:         0
      gpu.intel.com/i915:  0
      nvidia.com/gpu:      4
    Requests:
      amd.com/gpu:         0
      cpu:                 10m
      gpu.intel.com/i915:  0
      memory:              50Mi
      nvidia.com/gpu:      4
    Liveness:              tcp-socket :32400 delay=10s timeout=5s period=10s #success=1 #failure=5
    Readiness:             tcp-socket :32400 delay=10s timeout=5s period=10s #success=2 #failure=5
    Startup:               tcp-socket :32400 delay=10s timeout=2s period=5s #success=1 #failure=60
    Environment:
      KUBE_NAMESPACE:              ix-plex (v1:metadata.namespace)
      POD_IP:                       (v1:status.podIP)
      NVIDIA_DRIVER_CAPABILITIES:  all
      TZ:                          America/New_York
      PLEX_CLAIM:                  claim-xqNjvLPfxzjEUg1J2AzW
      PMS_INTERNAL_ADDRESS:        http://plex:32400
      PMS_IMAGE:                   plexinc/pms-docker:1.32.8.7639-fb6452ebf
    Mounts:
      /config from config (rw)
      /config/Library/Application Support/Plex Media Server/Logs from shared-logs (rw)
      /data from data (rw)
      /shared from shared (rw)
      /transcode from transcode (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-qfwnj (ro)
      Movies from extrappvolume-0 (rw)
Volumes:
  config:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_config
    HostPathType:  
  data:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_data
    HostPathType:  
  shared:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  shared-logs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  transcode:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_transcode
    HostPathType:  
  extrappvolume-0:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Rusty/Spinners/Plex
    HostPathType:  
  kube-api-access-qfwnj:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   Burstable
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason                    Age   From               Message
  ----     ------                    ----  ----               -------
  Normal   Scheduled                 14m   default-scheduler  Successfully assigned ix-plex/plex-7d74cbd9b7-lwrkk to ix-truenas
  Warning  UnexpectedAdmissionError  14m   kubelet            Allocate failed due to rpc error: code = Unknown desc = request for 'nvidia.com/gpu: 4' too large: maximum request size for shared resources is 1, which is unexpected


Name:                plex-7d74cbd9b7-w5h6x
Namespace:           ix-plex
Priority:            0
Runtime Class Name:  nvidia
Service Account:     default
Node:                ix-truenas/
Start Time:          Sun, 24 Dec 2023 12:16:32 -0500
Labels:              app.kubernetes.io/instance=plex
                     app.kubernetes.io/name=plex
                     pod-template-hash=7d74cbd9b7
Annotations:         rollme: QeUzT
Status:              Failed
Reason:              UnexpectedAdmissionError
Message:             Pod was rejected: Allocate failed due to rpc error: code = Unknown desc = request for 'nvidia.com/gpu: 4' too large: maximum request size for shared resources is 1, which is unexpected
IP:                  
IPs:                 <none>
Controlled By:       ReplicaSet/plex-7d74cbd9b7
Containers:
  plex:
    Image:       plexinc/pms-docker:plexpass
    Ports:       32400/TCP, 32469/TCP, 1900/UDP, 32410/UDP, 32412/UDP, 32413/UDP, 32414/UDP
    Host Ports:  32400/TCP, 32469/TCP, 1900/UDP, 32410/UDP, 32412/UDP, 32413/UDP, 32414/UDP
    Limits:
      amd.com/gpu:         0
      gpu.intel.com/i915:  0
      nvidia.com/gpu:      4
    Requests:
      amd.com/gpu:         0
      cpu:                 10m
      gpu.intel.com/i915:  0
      memory:              50Mi
      nvidia.com/gpu:      4
    Liveness:              tcp-socket :32400 delay=10s timeout=5s period=10s #success=1 #failure=5
    Readiness:             tcp-socket :32400 delay=10s timeout=5s period=10s #success=2 #failure=5
    Startup:               tcp-socket :32400 delay=10s timeout=2s period=5s #success=1 #failure=60
    Environment:
      KUBE_NAMESPACE:              ix-plex (v1:metadata.namespace)
      POD_IP:                       (v1:status.podIP)
      NVIDIA_DRIVER_CAPABILITIES:  all
      TZ:                          America/New_York
      PLEX_CLAIM:                  claim-xqNjvLPfxzjEUg1J2AzW
      PMS_INTERNAL_ADDRESS:        http://plex:32400
      PMS_IMAGE:                   plexinc/pms-docker:1.32.8.7639-fb6452ebf
    Mounts:
      /config from config (rw)
      /config/Library/Application Support/Plex Media Server/Logs from shared-logs (rw)
      /data from data (rw)
      /shared from shared (rw)
      /transcode from transcode (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-zpgtg (ro)
      Movies from extrappvolume-0 (rw)
Volumes:
  config:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_config
    HostPathType:  
  data:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_data
    HostPathType:  
  shared:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  shared-logs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  transcode:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_transcode
    HostPathType:  
  extrappvolume-0:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Rusty/Spinners/Plex
    HostPathType:  
  kube-api-access-zpgtg:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   Burstable
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason                    Age   From               Message
  ----     ------                    ----  ----               -------
  Normal   Scheduled                 14m   default-scheduler  Successfully assigned ix-plex/plex-7d74cbd9b7-w5h6x to ix-truenas
  Warning  UnexpectedAdmissionError  14m   kubelet            Allocate failed due to rpc error: code = Unknown desc = request for 'nvidia.com/gpu: 4' too large: maximum request size for shared resources is 1, which is unexpected


Name:                plex-68fb4bd7b4-t94zp
Namespace:           ix-plex
Priority:            0
Runtime Class Name:  nvidia
Service Account:     default
Node:                ix-truenas/192.168.1.3
Start Time:          Sun, 24 Dec 2023 12:17:33 -0500
Labels:              app.kubernetes.io/instance=plex
                     app.kubernetes.io/name=plex
                     pod-template-hash=68fb4bd7b4
Annotations:         rollme: cDV09
Status:              Running
IP:                  192.168.1.3
IPs:
  IP:           192.168.1.3
Controlled By:  ReplicaSet/plex-68fb4bd7b4
Containers:
  plex:
    Container ID:   containerd://026da5499d38d9795f14a2bd6f41492dee0976f40745c3ee0a928c027d2f4df5
    Image:          plexinc/pms-docker:plexpass
    Image ID:       docker.io/plexinc/pms-docker@sha256:8aeb4a982ea564ad309861dd251cd9e218aac3f4e4d3da21375568341be1b16f
    Ports:          32400/TCP, 32469/TCP, 1900/UDP, 32410/UDP, 32412/UDP, 32413/UDP, 32414/UDP
    Host Ports:     32400/TCP, 32469/TCP, 1900/UDP, 32410/UDP, 32412/UDP, 32413/UDP, 32414/UDP
    State:          Running
      Started:      Sun, 24 Dec 2023 12:17:36 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      amd.com/gpu:         0
      gpu.intel.com/i915:  0
      nvidia.com/gpu:      1
    Requests:
      amd.com/gpu:         0
      cpu:                 10m
      gpu.intel.com/i915:  0
      memory:              50Mi
      nvidia.com/gpu:      1
    Liveness:              tcp-socket :32400 delay=10s timeout=5s period=10s #success=1 #failure=5
    Readiness:             tcp-socket :32400 delay=10s timeout=5s period=10s #success=2 #failure=5
    Startup:               tcp-socket :32400 delay=10s timeout=2s period=5s #success=1 #failure=60
    Environment:
      KUBE_NAMESPACE:              ix-plex (v1:metadata.namespace)
      POD_IP:                       (v1:status.podIP)
      NVIDIA_DRIVER_CAPABILITIES:  all
      TZ:                          America/New_York
      PLEX_CLAIM:                  claim-xqNjvLPfxzjEUg1J2AzW
      PMS_INTERNAL_ADDRESS:        http://plex:32400
      PMS_IMAGE:                   plexinc/pms-docker:1.32.8.7639-fb6452ebf
    Mounts:
      /config from config (rw)
      /config/Library/Application Support/Plex Media Server/Logs from shared-logs (rw)
      /data from data (rw)
      /shared from shared (rw)
      /transcode from transcode (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-5dd5p (ro)
      Movies from extrappvolume-0 (rw)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  config:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_config
    HostPathType:  
  data:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_data
    HostPathType:  
  shared:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  shared-logs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  transcode:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Storage/ix-applications/releases/plex/volumes/ix_volumes/ix-plex_transcode
    HostPathType:  
  extrappvolume-0:
    Type:          HostPath (bare host directory volume)
    Path:          /mnt/Rusty/Spinners/Plex
    HostPathType:  
  kube-api-access-5dd5p:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   Burstable
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type    Reason     Age   From               Message
  ----    ------     ----  ----               -------
  Normal  Scheduled  13m   default-scheduler  Successfully assigned ix-plex/plex-68fb4bd7b4-t94zp to ix-truenas
  Normal  Pulled     13m   kubelet            Container image "plexinc/pms-docker:plexpass" already present on machine
  Normal  Created    13m   kubelet            Created container plex
  Normal  Started    13m   kubelet            Started container plex
