<?xml version="1.0" encoding="utf-8" ?><rss version="2.0" xmlns:tt="http://teletype.in/" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:media="http://search.yahoo.com/mrss/"><channel><title>@cameda2</title><generator>teletype.in</generator><description><![CDATA[@cameda2]]></description><link>https://teletype.in/@cameda2?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2</link><atom:link rel="self" type="application/rss+xml" href="https://teletype.in/rss/cameda2?offset=0"></atom:link><atom:link rel="next" type="application/rss+xml" href="https://teletype.in/rss/cameda2?offset=10"></atom:link><atom:link rel="search" type="application/opensearchdescription+xml" title="Teletype" href="https://teletype.in/opensearch.xml"></atom:link><pubDate>Wed, 13 May 2026 23:16:24 GMT</pubDate><lastBuildDate>Wed, 13 May 2026 23:16:24 GMT</lastBuildDate><item><guid isPermaLink="true">https://teletype.in/@cameda2/pyEPnaf64L-</guid><link>https://teletype.in/@cameda2/pyEPnaf64L-?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2</link><comments>https://teletype.in/@cameda2/pyEPnaf64L-?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2#comments</comments><dc:creator>cameda2</dc:creator><title>Service examples</title><pubDate>Fri, 15 Dec 2023 11:37:50 GMT</pubDate><category>Kubernetes practice</category><description><![CDATA[Примеры использования service.]]></description><content:encoded><![CDATA[
  <p id="XAzJ">Примеры использования service.</p>
  <p id="mKDo"><strong>Pod</strong></p>
  <pre data-lang="yaml" id="322B">cat &lt;&lt;EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
  name: cam-nginx
  namespace: default
  labels:
    app: nginx
    environment: prod
  annotations:
    author: cameda
spec:
  containers:
  - name: nginx
    image: nginx:latest
    imagePullPolicy: IfNotPresent
    ports:
    - containerPort: 80
    - containerPort: 443
    resources:
      requests:
        cpu: 300m
        memory: 300Mi
      limits:
        memory: 400Mi
  restartPolicy: Always
  hostname: nginx
  subdomain: web
EOF</pre>
  <p id="nhuA"><strong>Deployment</strong></p>
  <pre data-lang="yaml" id="VvrV">cat &lt;&lt;EOF | kubectl apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
  name: cameda-nginx
  namespace: default
  labels:
    app: nginx
    environment: prod
  annotations:
    author: cameda
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx
  strategy: 
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 1
    type: RollingUpdate
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        imagePullPolicy: IfNotPresent
        ports: 
        - name: http
          containerPort: 80
        resources: 
          requests: 
            cpu: 300m
            memory: 300Mi 
          limits: 
            memory: 400Mi
      restartPolicy: Always
      hostname: nginx
      subdomain: web
      dnsPolicy: ClusterFirst
      terminationGracePeriodSeconds: 90
EOF</pre>
  <hr />
  <h3 id="dgmS"><strong>Service NodePort</strong></h3>
  <pre data-lang="yaml" id="xUVA">#Минимальный вариант.
cat &lt;&lt;EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
  namespace: default
  name: nginx-service1
  labels:
    environment: prod
  annotations:
    author: cameda
spec:
  type: NodePort
  selector:
    app: nginx
  ports:
  - name: http
    protocol: TCP
    port: 80
EOF</pre>
  <pre data-lang="yaml" id="OLcS">#Указываем targetPort/nodePort.
cat &lt;&lt;EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
  namespace: default
  name: nginx-service2
  labels:
    environment: prod
  annotations:
    author: cameda
spec:
  type: NodePort
  selector:
    app: nginx
  ports:
  - name: http
    protocol: TCP
    port: 80
    targetPort: 80
    nodePort: 30010
EOF</pre>
  <pre id="OLcS" data-lang="yaml">#Открываем два порта.
cat &lt;&lt;EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
  namespace: default
  name: nginx-service3
  labels:
    environment: prod
  annotations:
    author: cameda
spec:
  type: NodePort
  selector:
    app: nginx
  ports:
  - name: http
    protocol: TCP
    port: 80
  - name: https
    protocol: TCP
    port: 443
EOF</pre>
  <pre data-lang="yaml" id="Cjd7">#На подах видны адреса источников обращения. Также идёт привязка сессий.
cat &lt;&lt;EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
  namespace: default
  name: nginx-service4
  labels:
    environment: prod
  annotations:
    author: cameda
spec:
  type: NodePort
  selector:
    app: nginx
  ports:
  - name: http
    protocol: TCP
    port: 80
    targetPort: 80
  externalTrafficPolicy: Local
  sessionAffinity: ClientIP
EOF</pre>
  <hr />
  <h3 id="mW5c"><strong>Service ClusterIP</strong></h3>
  <pre data-lang="yaml" id="Ys7X">#Минимальный вариант.
cat &lt;&lt;EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
  namespace: default
  name: nginx-service5
  labels:
    environment: prod
  annotations:
    author: cameda
spec:
  type: ClusterIP
  selector:
    app: nginx
  ports:
  - name: http
    protocol: TCP
    port: 80
EOF</pre>
  <pre id="Ys7X" data-lang="yaml">#Указываем targetPort.
cat &lt;&lt;EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
  namespace: default
  name: nginx-service6
  labels:
    environment: prod
  annotations:
    author: cameda
spec:
  type: ClusterIP
  selector:
    app: nginx
  ports:
  - name: http
    protocol: TCP
    port: 80
    targetPort: 80
EOF</pre>
  <pre data-lang="yaml" id="fCmS">#Открываем два порта.
cat &lt;&lt;EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
  namespace: default
  name: nginx-service7
  labels:
    environment: prod
  annotations:
    author: cameda
spec:
  type: ClusterIP
  selector:
    app: nginx
  ports:
  - name: http
    protocol: TCP
    port: 80
  - name: https
    protocol: TCP
    port: 443
EOF</pre>
  <hr />
  <h3 id="tKX8"><strong>Service LoadBalancer</strong></h3>
  <pre data-lang="yaml" id="GqfJ">#Простой сервис типа балансер.
cat &lt;&lt;EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
  namespace: default
  name: nginx-service8
  labels:
    environment: prod
  annotations:
    author: cameda
spec:
  type: LoadBalancer
  selector:
    app: nginx
  ports:
  - name: http
    protocol: TCP
    port: 80
    targetPort: 80
EOF</pre>
  <pre data-lang="yaml" id="W89l">#Внутренний балансировщик
cat &lt;&lt;EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
  namespace: default
  name: nginx-service9
  annotations:
    # Тип балансировщика: внутренний.
    yandex.cloud/load-balancer-type: internal
  labels:
    environment: prod
  annotations:
    author: cameda
spec:
  type: LoadBalancer
  selector:
    app: nginx
  ports:
  - name: http
    protocol: TCP
    port: 80
    targetPort: 80
EOF</pre>
  <pre data-lang="yaml" id="Wgnx">#Балансировщик с зарезервированным адресом, проброской адреса источника в под.
cat &lt;&lt;EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
  namespace: default
  name: nginx-service10
  annotations:
    yandex.cloud/subnet-id: &lt;subnet-id&gt;
  labels:
    environment: prod
  annotations:
    author: cameda
spec:
  type: LoadBalancer
  selector:
    app: nginx
  ports:
  - name: http
    protocol: TCP
    port: 80
    targetPort: 80
  externalTrafficPolicy: Local
  loadBalancerIP: &lt;заранее зарезервированный IP-адрес&gt;
EOF</pre>
  <pre id="Wgnx" data-lang="yaml">#Балансировщик с зарезервированным адресом, проброской адреса источника в под и привязкой сессий
cat &lt;&lt;EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
  namespace: default
  name: nginx-service11
  annotations:
    yandex.cloud/subnet-id: &lt;subnet-id&gt;
  labels:
    environment: prod
  annotations:
    author: cameda
spec:
  type: LoadBalancer
  selector:
    app: nginx
  ports:
  - name: http
    protocol: TCP
    port: 80
    targetPort: 80
  - name: https
    protocol: TCP
    port: 443
    targetPort: 443
  externalTrafficPolicy: Local
  loadBalancerIP: &lt;заранее зарезервированный IP-адрес&gt;
  sessionAffinity: ClientIP
EOF</pre>

]]></content:encoded></item><item><guid isPermaLink="true">https://teletype.in/@cameda2/ESLk1x8NSD4</guid><link>https://teletype.in/@cameda2/ESLk1x8NSD4?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2</link><comments>https://teletype.in/@cameda2/ESLk1x8NSD4?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2#comments</comments><dc:creator>cameda2</dc:creator><title>Orphaned pod pod_id found, but error not a directory occurred when trying to remove the volumes dir</title><pubDate>Sat, 12 Aug 2023 18:50:15 GMT</pubDate><category>kubernetes errors</category><description><![CDATA[Воспроизводится на версиях Kubernetes до 1.24.]]></description><content:encoded><![CDATA[
  <p id="52vG">Воспроизводится на версиях Kubernetes до 1.24.</p>
  <p id="vZQd">Это баг на стороне стораджа. Стреляет когда были проблемы с нодой, например, в результате нештатной перезагрузки.</p>
  <p id="dADB">Помогает удаление связанной с подом директории из /var/lib/kubelet/pods</p>
  <p id="JOCq">Для примера можно использовать решение отсюда: <br /><a href="https://github.com/kubernetes/kubernetes/issues/105536?ysclid=ll6te4b7x91590084#issuecomment-1612347166" target="_blank">https://github.com/kubernetes/kubernetes/issues/105536?ysclid=ll6te4b7x91590084#issuecomment-1612347166</a></p>
  <pre id="5bc3" data-lang="bash">#!/bin/bash
while true
do
        tail /var/log/k8s-service.log | grep &quot;orphaned pod&quot; | awk &#x27;{print $18}&#x27; | cut -d\\ -f2 | cut -d\&quot; -f2 | uniq | xargs -I % sh -c &#x27;echo &quot;deleting /var/lib/kubelet/pods/%&quot;; rm -rf /var/lib/kubelet/pods/%;&#x27;
        sleep 1
done</pre>

]]></content:encoded></item><item><guid isPermaLink="true">https://teletype.in/@cameda2/wM9Lhx_kxTM</guid><link>https://teletype.in/@cameda2/wM9Lhx_kxTM?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2</link><comments>https://teletype.in/@cameda2/wM9Lhx_kxTM?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2#comments</comments><dc:creator>cameda2</dc:creator><title>Fairwinds. Polaris</title><pubDate>Tue, 11 Jul 2023 05:08:07 GMT</pubDate><category>Kubernetes utils</category><description><![CDATA[<img src="https://img4.teletype.in/files/75/a0/75a01597-12a1-4a17-910c-904804024aa5.png"></img>Данное ПО создано для анализа имеющихся ресурсов в кластере на предмет уязвимостей.]]></description><content:encoded><![CDATA[
  <p id="pvvu">Данное ПО создано для анализа имеющихся ресурсов в кластере на предмет уязвимостей.</p>
  <p id="xqXT"><strong>Установка.</strong></p>
  <pre id="Wz1U" data-lang="bash">helm repo add fairwinds-stable https://charts.fairwinds.com/stable
helm upgrade --install polaris fairwinds-stable/polaris --namespace polaris --create-namespace

# Быстрый вариант подключения через port-forward
kubectl port-forward --namespace polaris svc/polaris-dashboard 8080:80</pre>
  <p id="8G8a"><strong>Service.</strong></p>
  <pre id="SFtf" data-lang="yaml">kubectl describe svc polaris-dashboard -n polaris
Name:              polaris-dashboard
Namespace:         polaris
Labels:            app=polaris
                   app.kubernetes.io/component=dashboard
                   app.kubernetes.io/instance=polaris
                   app.kubernetes.io/managed-by=Helm
                   app.kubernetes.io/name=polaris
                   app.kubernetes.io/part-of=polaris
                   app.kubernetes.io/version=5.10.3
                   helm.sh/chart=polaris-5.10.3
Annotations:       meta.helm.sh/release-name: polaris
                   meta.helm.sh/release-namespace: polaris
Selector:          app.kubernetes.io/instance=polaris,app.kubernetes.io/name=polaris,app=polaris,component=dashboard
Type:              ClusterIP
IP Family Policy:  SingleStack
IP Families:       IPv4
IP:                10.21.28.186
IPs:               10.21.28.186
Port:              http-dashboard  80/TCP
TargetPort:        8080/TCP
Endpoints:         10.20.6.233:8080,10.20.7.86:8080
Session Affinity:  None
Events:            &lt;none&gt;</pre>
  <p id="csN2"><strong>Ingress.</strong></p>
  <pre data-lang="yaml" id="MDPf">cat &lt;&lt;EOF | kubectl apply -f -
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: polaris
  namespace: polaris
  annotations:
    kubernetes.io/ingress.class: &quot;nginx&quot;
spec:
  rules:
    - host: polaris.prod.cameda1.tk
      http:
        paths:
        - path: /
          pathType: Prefix
          backend:
            service:
              name: polaris-dashboard
              port:
                number: 8080
EOF</pre>
  <p id="Xt9F">Скрин приложения.</p>
  <figure id="oTMr" class="m_column">
    <img src="https://img4.teletype.in/files/75/a0/75a01597-12a1-4a17-910c-904804024aa5.png" width="2875" />
    <figcaption>Скриншот Polaris </figcaption>
  </figure>
  <h3 id="YFng">Полезные ссылки.</h3>
  <p id="lpW9">Установка в режиме дашборда: <a href="https://polaris.docs.fairwinds.com/dashboard/" target="_blank">https://polaris.docs.fairwinds.com/dashboard/</a></p>
  <p id="2PCp">Кратко про Polaris: <a href="https://github.com/FairwindsOps/polaris/tree/master" target="_blank">https://github.com/FairwindsOps/polaris/tree/master</a></p>

]]></content:encoded></item><item><guid isPermaLink="true">https://teletype.in/@cameda2/jHh2t2J4E1T</guid><link>https://teletype.in/@cameda2/jHh2t2J4E1T?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2</link><comments>https://teletype.in/@cameda2/jHh2t2J4E1T?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2#comments</comments><dc:creator>cameda2</dc:creator><title>Download Kubernetes</title><pubDate>Thu, 06 Jul 2023 15:22:23 GMT</pubDate><category>Kubernetes utils</category><description><![CDATA[Скачать k8s последней версии можно использовав команду.]]></description><content:encoded><![CDATA[
  <p id="8TeX">Скачать k8s последней версии можно использовав команду.</p>
  <pre id="LF0M" data-lang="bash">wget -q -O - https://get.k8s.io | bash
cd kubernetes</pre>
  <p id="XowX">Компоненты кластера в скомпилированном виде лежат в директории server.<br /></p>

]]></content:encoded></item><item><guid isPermaLink="true">https://teletype.in/@cameda2/ZRzMOvgAlyl</guid><link>https://teletype.in/@cameda2/ZRzMOvgAlyl?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2</link><comments>https://teletype.in/@cameda2/ZRzMOvgAlyl?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2#comments</comments><dc:creator>cameda2</dc:creator><title>kubectl ktop. top для k8s</title><pubDate>Thu, 06 Jul 2023 10:18:54 GMT</pubDate><category>Kubernetes utils</category><description><![CDATA[<img src="https://img1.teletype.in/files/0d/a8/0da8e56d-d5ec-4ceb-9381-e6e5b4fb50b2.png"></img>Данный плагин предназначен для мониторинга нагрузки на нодах кластера.]]></description><content:encoded><![CDATA[
  <p id="3oSb">Данный плагин предназначен для мониторинга нагрузки на нодах кластера.</p>
  <p id="BTqL"><strong>Установка krew.</strong></p>
  <pre data-lang="bash" id="qIEi">(
  set -x; cd &quot;$(mktemp -d)&quot; &amp;&amp;
  OS=&quot;$(uname | tr &#x27;[:upper:]&#x27; &#x27;[:lower:]&#x27;)&quot; &amp;&amp;
  ARCH=&quot;$(uname -m | sed -e &#x27;s/x86_64/amd64/&#x27; -e &#x27;s/\(arm\)\(64\)\?.*/\1\2/&#x27; -e &#x27;s/aarch64$/arm64/&#x27;)&quot; &amp;&amp;
  KREW=&quot;krew-${OS}_${ARCH}&quot; &amp;&amp;
  curl -fsSLO &quot;https://github.com/kubernetes-sigs/krew/releases/latest/download/${KREW}.tar.gz&quot; &amp;&amp;
  tar zxvf &quot;${KREW}.tar.gz&quot; &amp;&amp;
  ./&quot;${KREW}&quot; install krew
)</pre>
  <pre data-lang="bash" id="LMkt">export PATH=&quot;${KREW_ROOT:-$HOME/.krew}/bin:$PATH&quot;</pre>
  <p id="na8r"><strong>Установка ktop.</strong></p>
  <pre data-lang="bash" id="kZKv">kubectl krew install ktop</pre>
  <p id="jtO0"><strong>Примеры использования.</strong></p>
  <pre data-lang="bash" id="ZD3m">kubectl ktop</pre>
  <p id="gaEQ">Перемещение между блоками происходит с помощью Tab. Между элементами - с помощью стрелок.</p>
  <p id="uX18"><strong>Скриншот программы.</strong></p>
  <figure id="oU8N" class="m_column">
    <img src="https://img1.teletype.in/files/0d/a8/0da8e56d-d5ec-4ceb-9381-e6e5b4fb50b2.png" width="2867" />
  </figure>
  <h3 id="dTyD">Полезные ссылки.</h3>
  <p id="p7cQ">GitHUB проекта: <a href="https://github.com/vladimirvivien/ktop" target="_blank">https://github.com/vladimirvivien/ktop</a></p>
  <p id="1qJ9">Установка krew: <a href="https://krew.sigs.k8s.io/docs/user-guide/setup/install/" target="_blank">https://krew.sigs.k8s.io/docs/user-guide/setup/install/</a></p>
  <p id="xmgx">Список плагинов krew: <a href="https://krew.sigs.k8s.io/plugins/" target="_blank">https://krew.sigs.k8s.io/plugins/</a></p>

]]></content:encoded></item><item><guid isPermaLink="true">https://teletype.in/@cameda2/FcSI4VDpZ4Y</guid><link>https://teletype.in/@cameda2/FcSI4VDpZ4Y?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2</link><comments>https://teletype.in/@cameda2/FcSI4VDpZ4Y?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2#comments</comments><dc:creator>cameda2</dc:creator><title>Kyverno. failed to call webhook: service kyverno-svc not found</title><pubDate>Wed, 05 Jul 2023 07:52:31 GMT</pubDate><category>MinIO+Vault+Kaniko+KrakenD+Istio</category><description><![CDATA[Данная ошибка у меня появилась при попытке удаления namespace kuverno. В ns был под, созданный deployment, который не удалялся.]]></description><content:encoded><![CDATA[
  <p id="mRUu">Данная ошибка у меня появилась при попытке удаления namespace kuverno. В ns был под, созданный deployment, который не удалялся.</p>
  <p id="y8m2">Поймал данную ошибку при попытке удалить namespace со всеми компонентами kyverno. Остался под, созданный с помощью deployment, который не удалялся. При этом самого deployment уже не было. Удаление finalizers в namespace тоже не помогло.</p>
  <p id="CFK4">Помогло удалить ns удаление нескольких объектов.</p>
  <pre id="O1J5" data-lang="bash">kubectl delete validatingwebhookconfiguration kyverno-resource-validating-webhook-cfg
kubectl delete mutatingwebhookconfiguration kyverno-resource-mutating-webhook-cfg</pre>
  <p id="Xxeo">Данное решение подсмотрел на сайте kyverno.io</p>
  <h3 id="DNVl">Полезные ссылки.</h3>
  <p id="YwX8">Troubleshooting Kyverno: <a href="https://kyverno.io/docs/troubleshooting/" target="_blank">https://kyverno.io/docs/troubleshooting/</a></p>

]]></content:encoded></item><item><guid isPermaLink="true">https://teletype.in/@cameda2/zTZ5o7kXxoC</guid><link>https://teletype.in/@cameda2/zTZ5o7kXxoC?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2</link><comments>https://teletype.in/@cameda2/zTZ5o7kXxoC?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2#comments</comments><dc:creator>cameda2</dc:creator><title>Kyverno. Policy label required.</title><pubDate>Mon, 03 Jul 2023 17:58:29 GMT</pubDate><category>MinIO+Vault+Kaniko+KrakenD+Istio</category><description><![CDATA[Пример политики и работы с Kyverno.]]></description><content:encoded><![CDATA[
  <p id="7DZx">Пример политики и работы с Kyverno.</p>
  <p id="L873"><strong>Обязательная установка labels на поды.</strong></p>
  <pre id="Hc8x" data-lang="yaml">cat &lt;&lt;EOF | kubectl apply -f -
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
  name: require-labels
spec:
  validationFailureAction: enforce
  rules:
  - name: check-for-labels
    match:
      any:
      - resources:
          kinds:
          - Pod
    validate:
      message: &quot;label &#x27;app.kubernetes.io/name&#x27; is required&quot;
      pattern:
        metadata:
          labels:
            app.kubernetes.io/name: &quot;?*&quot;
EOF</pre>
  <p id="6gdC">Ошибка, вылетающая если создать под без нужного label.</p>
  <pre id="6gdC" data-lang="bash">Error from server: error when creating &quot;STDIN&quot;: admission webhook &quot;validate.kyverno.svc-fail&quot; denied the request:

resource Pod/test/cam-nginx was blocked due to the following policies

require-labels:
  check-for-labels: &#x27;validation error: label &#x27;&#x27;app.kubernetes.io/name&#x27;&#x27; is required.
    Rule check-for-labels failed at path /metadata/labels/app.kubernetes.io/name/&#x27;</pre>
  <p id="aniQ"><strong>Посмотреть политики Kyverno.</strong></p>
  <pre id="RRJx" data-lang="bash">kubectl get clusterpolicy.kyverno.io

kubectl get clusterpolicy.kyverno.io | grep &quot;enforce&quot;
require-labels                   true         enforce   true</pre>
  <pre id="VLFk" data-lang="bash">kubectl get clusterpolicy.kyverno.io -owide
NAME                             BACKGROUND   ACTION    FAILURE POLICY   READY
disallow-capabilities            true         audit     Fail             true
disallow-host-namespaces         true         audit     Fail             true
disallow-host-path               true         audit     Fail             true
disallow-host-ports              true         audit     Fail             true
disallow-host-process            true         audit     Fail             true
disallow-privileged-containers   true         audit     Fail             true
disallow-proc-mount              true         audit     Fail             true
disallow-selinux                 true         audit     Fail             true
require-labels                   true         enforce   Fail             true
restrict-apparmor-profiles       true         audit     Fail             true
restrict-seccomp                 true         audit     Fail             true
restrict-sysctls                 true         audit     Fail             true</pre>
  <p id="rLRo"><strong>Дескрайб сзданной политики.</strong></p>
  <pre id="rLRo" data-lang="bash">kubectl describe clusterpolicy.kyverno.io require-labels
Name:         require-labels
Namespace:
Labels:       &lt;none&gt;
Annotations:  pod-policies.kyverno.io/autogen-controllers: DaemonSet,Deployment,Job,StatefulSet,CronJob
API Version:  kyverno.io/v1
Kind:         ClusterPolicy
Metadata:
  Creation Timestamp:  2023-07-03T17:08:33Z
  Generation:          2
  Managed Fields:
    API Version:  kyverno.io/v1
    Fields Type:  FieldsV1
    fieldsV1:
      f:metadata:
        f:annotations:
          .:
          f:kubectl.kubernetes.io/last-applied-configuration:
      f:spec:
        .:
        f:validationFailureAction:
    Manager:      kubectl-client-side-apply
    Operation:    Update
    Time:         2023-07-03T17:08:33Z
    API Version:  kyverno.io/v1
    Fields Type:  FieldsV1
    fieldsV1:
      f:spec:
        f:rules:
    Manager:      kyverno
    Operation:    Update
    Time:         2023-07-03T17:08:33Z
    API Version:  kyverno.io/v1
    Fields Type:  FieldsV1
    fieldsV1:
      f:status:
        .:
        f:ready:
    Manager:         kyverno
    Operation:       Update
    Subresource:     status
    Time:            2023-07-03T17:08:39Z
  Resource Version:  16764995
  UID:               3e5e1c5c-531f-421c-afc1-e93697305131
Spec:
  Background:      true
  Failure Policy:  Fail
  Rules:
    Exclude:
      Resources:
    Generate:
      Clone:
    Match:
      Any:
        Resources:
          Kinds:
            Pod
      Resources:
    Mutate:
    Name:  check-for-labels
    Validate:
      Message:  label &#x27;app.kubernetes.io/name&#x27; is required
      Pattern:
        Metadata:
          Labels:
            app.kubernetes.io/name:  ?*
    Exclude:
      Resources:
    Generate:
      Clone:
    Match:
      Any:
        Resources:
          Kinds:
            DaemonSet
            Deployment
            Job
            StatefulSet
      Resources:
    Mutate:
    Name:  autogen-check-for-labels
    Validate:
      Message:  label &#x27;app.kubernetes.io/name&#x27; is required
      Pattern:
        Spec:
          Template:
            Metadata:
              Labels:
                app.kubernetes.io/name:  ?*
    Exclude:
      Resources:
    Generate:
      Clone:
    Match:
      Any:
        Resources:
          Kinds:
            CronJob
      Resources:
    Mutate:
    Name:  autogen-cronjob-check-for-labels
    Validate:
      Message:  label &#x27;app.kubernetes.io/name&#x27; is required
      Pattern:
        Spec:
          Job Template:
            Spec:
              Template:
                Metadata:
                  Labels:
                    app.kubernetes.io/name:  ?*
  Validation Failure Action:                 enforce
Status:
  Ready:  true</pre>
  <p id="6Pv7"><strong>Пример пода с правильными label</strong></p>
  <pre data-lang="yaml" id="IeE0">cat &lt;&lt;EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
  name: cam-nginx
  namespace: default
  labels:
    app: nginx
    environment: prod
  annotations:
    author: cameda
spec:
  containers:
  - name: nginx
    image: nginx:latest
    imagePullPolicy: IfNotPresent
    ports:
    - containerPort: 80
    - containerPort: 443
    resources:
      requests:
        cpu: 300m
        memory: 300Mi
      limits:
        memory: 400Mi
  restartPolicy: Always
  hostname: nginx
  subdomain: web
EOF</pre>

]]></content:encoded></item><item><guid isPermaLink="true">https://teletype.in/@cameda2/fO8qe_CIZ1n</guid><link>https://teletype.in/@cameda2/fO8qe_CIZ1n?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2</link><comments>https://teletype.in/@cameda2/fO8qe_CIZ1n?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2#comments</comments><dc:creator>cameda2</dc:creator><title>kubectl deprecations. Поиск объектов с устаревшей версией  API. (krew)</title><pubDate>Mon, 26 Jun 2023 17:33:34 GMT</pubDate><category>Kubernetes utils</category><description><![CDATA[Данный плагин предназначен для поиска, в текущем кластере k8s, объектов с устаревшей версией API.]]></description><content:encoded><![CDATA[
  <p id="3oSb">Данный плагин предназначен для поиска, в текущем кластере k8s, объектов с устаревшей версией API.</p>
  <p id="B8xv"><strong>Установка krew.</strong></p>
  <pre data-lang="bash" id="EdZv">(
  set -x; cd &quot;$(mktemp -d)&quot; &amp;&amp;
  OS=&quot;$(uname | tr &#x27;[:upper:]&#x27; &#x27;[:lower:]&#x27;)&quot; &amp;&amp;
  ARCH=&quot;$(uname -m | sed -e &#x27;s/x86_64/amd64/&#x27; -e &#x27;s/\(arm\)\(64\)\?.*/\1\2/&#x27; -e &#x27;s/aarch64$/arm64/&#x27;)&quot; &amp;&amp;
  KREW=&quot;krew-${OS}_${ARCH}&quot; &amp;&amp;
  curl -fsSLO &quot;https://github.com/kubernetes-sigs/krew/releases/latest/download/${KREW}.tar.gz&quot; &amp;&amp;
  tar zxvf &quot;${KREW}.tar.gz&quot; &amp;&amp;
  ./&quot;${KREW}&quot; install krew
)</pre>
  <pre data-lang="bash" id="AjdV">export PATH=&quot;${KREW_ROOT:-$HOME/.krew}/bin:$PATH&quot;</pre>
  <p id="na8r"><strong>Установка kubepug.</strong></p>
  <pre id="qayp" data-lang="bash">kubectl krew install deprecations</pre>
  <p id="jtO0"><strong>Примеры использования.</strong></p>
  <pre id="0oP3" data-lang="bash">kubectl deprecations --k8s-version=v1.24.8
helm template -f values.yaml .0 | kubectl deprecations --k8s-version v1.22.0 --input-file=-</pre>
  <p id="gaEQ">Работает только если есть права админа на кластер.</p>
  <h3 id="dTyD">Полезные ссылки.</h3>
  <p id="p7cQ">GitHUB проекта: <a href="https://github.com/rikatz/kubepug" target="_blank">https://github.com/rikatz/kubepug</a></p>
  <p id="1qJ9">Установка krew: <a href="https://krew.sigs.k8s.io/docs/user-guide/setup/install/" target="_blank">https://krew.sigs.k8s.io/docs/user-guide/setup/install/</a></p>
  <p id="xmgx">Список плагинов krew: <a href="https://krew.sigs.k8s.io/plugins/" target="_blank">https://krew.sigs.k8s.io/plugins/</a></p>

]]></content:encoded></item><item><guid isPermaLink="true">https://teletype.in/@cameda2/NUjgN3X80-v</guid><link>https://teletype.in/@cameda2/NUjgN3X80-v?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2</link><comments>https://teletype.in/@cameda2/NUjgN3X80-v?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2#comments</comments><dc:creator>cameda2</dc:creator><title>Jessie pod for DNS diag</title><pubDate>Mon, 26 Jun 2023 16:14:06 GMT</pubDate><category>Kubernetes pod/deploy/sts/ds example</category><description><![CDATA[Под предназначен для диагностики DNS из кластера k8s.]]></description><content:encoded><![CDATA[
  <p id="2U53">Под предназначен для диагностики DNS из кластера k8s.</p>
  <p id="OMSi"><strong>Установка.</strong></p>
  <pre id="ZzMI" data-lang="bash">kubectl run jessie-dnsutils --image=k8s.gcr.io/jessie-dnsutils --restart=Never --command sleep infinity</pre>
  <p id="fs3O"><strong>Примеры использования.</strong></p>
  <pre id="fs3O" data-lang="bash">kubectl exec --tty --stdin jessie-dnsutils -- dig ya.ru</pre>
  <pre id="fs3O" data-lang="bash">kubectl exec --tty --stdin jessie-dnsutils -- dig NS ya.ru +short
ns2.yandex.ru.
ns1.yandex.ru.

kubectl exec --tty --stdin jessie-dnsutils -- ping ya.ru
PING ya.ru (5.255.255.242): 56 data bytes
64 bytes from 5.255.255.242: icmp_seq=0 ttl=56 time=3.971 ms</pre>

]]></content:encoded></item><item><guid isPermaLink="true">https://teletype.in/@cameda2/-7kpm1oK4f5</guid><link>https://teletype.in/@cameda2/-7kpm1oK4f5?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2</link><comments>https://teletype.in/@cameda2/-7kpm1oK4f5?utm_source=teletype&amp;utm_medium=feed_rss&amp;utm_campaign=cameda2#comments</comments><dc:creator>cameda2</dc:creator><title>kubectl count (krew)</title><pubDate>Mon, 26 Jun 2023 16:03:42 GMT</pubDate><category>Kubernetes utils</category><description><![CDATA[Данный плагин предназначен для подсчёта количества ресурсов в Namespace по kind.]]></description><content:encoded><![CDATA[
  <p id="AWCg">Данный плагин предназначен для подсчёта количества ресурсов в Namespace по kind.</p>
  <p id="B8xv"><strong>Установка krew.</strong></p>
  <pre data-lang="bash" id="VwrH">(
  set -x; cd &quot;$(mktemp -d)&quot; &amp;&amp;
  OS=&quot;$(uname | tr &#x27;[:upper:]&#x27; &#x27;[:lower:]&#x27;)&quot; &amp;&amp;
  ARCH=&quot;$(uname -m | sed -e &#x27;s/x86_64/amd64/&#x27; -e &#x27;s/\(arm\)\(64\)\?.*/\1\2/&#x27; -e &#x27;s/aarch64$/arm64/&#x27;)&quot; &amp;&amp;
  KREW=&quot;krew-${OS}_${ARCH}&quot; &amp;&amp;
  curl -fsSLO &quot;https://github.com/kubernetes-sigs/krew/releases/latest/download/${KREW}.tar.gz&quot; &amp;&amp;
  tar zxvf &quot;${KREW}.tar.gz&quot; &amp;&amp;
  ./&quot;${KREW}&quot; install krew
)</pre>
  <pre data-lang="bash" id="nhSs">export PATH=&quot;${KREW_ROOT:-$HOME/.krew}/bin:$PATH&quot;</pre>
  <p id="na8r"><strong>Установка count.</strong></p>
  <pre id="R9ns">kubectl krew install count</pre>
  <p id="jtO0"><strong>Примеры использования.</strong></p>
  <pre id="q3Ee" data-lang="bash">kubectl count pods,ds,deploy
kubectl count -n kube-system po,deploy

#Вывод в yaml формате.
kubectl count -oy -n kube-system deploy,svc</pre>
  <p id="wI6c"><strong>Пример вывода.</strong></p>
  <pre id="FpB5" data-lang="bash">kubectl count -oy -n kube-system deploy,svc
- namespace: kube-system
  groupVersion: apps/v1
  kind: Deployment
  count: 3
- namespace: kube-system
  groupVersion: v1
  kind: Service
  count: 10</pre>
  <pre id="rede" data-lang="bash">kubectl count pods -n kube-system
+-------------+------------------------+------------+-------+
|  Namespace  |      GroupVersion      |    Kind    | Count |
+-------------+------------------------+------------+-------+
| kube-system | v1                     | Pod        |    46 |
+             +------------------------+------------+       +
|             | metrics.k8s.io/v1beta1 | PodMetrics |       |
+-------------+------------------------+------------+-------+</pre>
  <h3 id="dTyD">Полезные ссылки.</h3>
  <p id="p7cQ">GitHUB проекта: <a href="https://github.com/chenjiandongx/kubectl-count" target="_blank">https://github.com/chenjiandongx/kubectl-count</a></p>
  <p id="1qJ9">Установка krew: <a href="https://krew.sigs.k8s.io/docs/user-guide/setup/install/" target="_blank">https://krew.sigs.k8s.io/docs/user-guide/setup/install/</a></p>
  <p id="xmgx">Список плагинов krew: <a href="https://krew.sigs.k8s.io/plugins/" target="_blank">https://krew.sigs.k8s.io/plugins/</a></p>

]]></content:encoded></item></channel></rss>