Helm/Pi-Hole: Difference between revisions
Jump to navigation
Jump to search
| Line 455: | Line 455: | ||
docker run --rm alpine ping www.k8s.ops | docker run --rm alpine ping www.k8s.ops | ||
docker run --rm alpine ping google.com | docker run --rm alpine ping google.com | ||
</syntaxhighlight> | |||
|} | |||
==Host » Resolv== | |||
{|class='wikitable mw-collapsible mw-collapsed' style='width:100%' | |||
!scope='col' style='text-align:left' colspan='2'| | |||
Helm » Resolv | |||
|- | |||
|valign='top' colspan='2'| | |||
<syntaxhighlight style='margin:3px 0 3px 0' lang='bash' highlight='1,26-29,62-80,97' line> | |||
cat << EXE | sudo tee /etc/NetworkManager/dispatcher.d/minikube-ifupdown >/dev/null | |||
#!/bin/bash | |||
: ' | |||
@vendor Shahed Academia, Inc. | |||
@web https://cdn.shahed.biz/docs/academia | |||
@version 2025.10.10 | |||
@since 2025.10.10 | |||
' | |||
USER_TARGET="minikube" | |||
IFACE="\${1}" | |||
ACTION="\${2}" | |||
function silent_exit(){ | |||
exit 0 | |||
} | |||
function verify_addr(){ | |||
printf -v DNS_HOST '%s' \$(sudo -u \${USER_TARGET} minikube ip) | |||
if [[ "\${DNS_HOST}" =~ ^(([1-9]?[0-9]|1[0-9][0-9]|2([0-4][0-9]|5[0-5]))\.){3}([1-9]?[0-9]|1[0-9][0-9]|2([0-4][0-9]|5[0-5]))$ ]]; then | |||
if [[ "\${DNS_HOST}" == '127.0.0.1' ]]; then silent_exit; fi | |||
else silent_exit; fi | |||
} | |||
function verify_kube(){ | |||
printf -v K8S_JSON '%s' \$(sudo -u \${USER_TARGET} minikube status -o 'json') | |||
printf -v K8S_REST '%s' \$(echo \${K8S_JSON} | jq -r '.APIServer') | |||
printf -v K8S_KUBE '%s' \$(echo \${K8S_JSON} | jq -r '.Kubelet') | |||
printf -v K8S_HOST '%s' \$(echo \${K8S_JSON} | jq -r '.Host') | |||
if [[ "\${K8S_HOST}" == 'Running' ]]&& | |||
[[ "\${K8S_KUBE}" == 'Running' ]]&& | |||
[[ "\${K8S_REST}" == 'Running' ]]; then verify_addr | |||
else silent_exit; fi | |||
} | |||
function verify_tool(){ | |||
if [[ -x "\$(command -v jq)" ]]; then verify_kube; else silent_exit; fi | |||
} | |||
function verify_mini(){ | |||
if [[ -x "\$(command -v minikube)" ]]; then verify_tool; else silent_exit; fi | |||
} | |||
function verify_link(){ | |||
HOST_ETHERS="\$(ip -j link show | jq -r '.[].ifname'| paste -sd' ' -)" | |||
if [[ "\${HOST_ETHERS}" =~ "\${IFACE}" ]]; then verify_mini; else silent_exit; fi | |||
} | |||
function verify_mode(){ | |||
if [[ "\${ACTION}" == 'up' ]]; then verify_link; else verify_mini; fi | |||
} | |||
function verify(){ | |||
verify_mode | |||
} | |||
function amend_nameserver(){ | |||
RESOLV_CONF='/etc/resolv.conf' | |||
if [[ -f \${RESOLV_CONF} ]]&&[[ "\$(grep -c \${DNS_HOST} \${RESOLV_CONF})" == 0 ]];then | |||
cat << CON | sudo tee \${RESOLV_CONF} >/dev/null | |||
# /etc/resolv.conf replaced by the minikube dispatcher | |||
# /etc/NetworkManager/dispatcher.d/minikube-ifupdown | |||
# | |||
# Dynamic resolv.conf (using network dispatch) | |||
# Primary DNS: Pi-Hole (192.168.49.110) | |||
# Secondary DNS: Minikube (192.168.49.2 ) | |||
# Backup DNS: Office DNS (10.19.83.100 ) | |||
# Loopback DNS: hostnamectl (127.0.0.53 ) | |||
# Public DNS fallback: Cloudflare + Google | |||
nameserver 192.168.49.110 | |||
nameserver 192.168.49.2 | |||
nameserver 10.19.83.100 | |||
nameserver 127.0.0.53 | |||
nameserver 1.1.1.1 | |||
nameserver 8.8.8.8 | |||
search ops local . | |||
options edns0 trust-ad | |||
CON | |||
else silent_exit; fi | |||
} | |||
function handle(){ | |||
amend_nameserver | |||
} | |||
function init(){ | |||
verify | |||
handle | |||
} | |||
init | |||
EXE | |||
sudo chmod +x /etc/NetworkManager/dispatcher.d/minikube-ifupdown | |||
</syntaxhighlight> | |||
|- | |||
|valign='top' style='width:50%'| | |||
<syntaxhighlight style='margin:3px 0 3px 0' lang='bash'> | |||
sudo systemctl restart systemd-resolved | |||
resolvectl status | |||
</syntaxhighlight> | |||
|valign='top' style='width:50%'| | |||
<syntaxhighlight style='margin:3px 0 3px 0' lang='bash'> | |||
dig +short pihole.k8s.ops | |||
dig +short host.k8s.ops | |||
</syntaxhighlight> | </syntaxhighlight> | ||
|} | |} | ||
Revision as of 01:52, 24 October 2025
|
Helm |
|---|
helm repo add mojo2600 https://mojo2600.github.io/pihole-kubernetes
helm repo update && helm repo list
kubectl config get-contexts
|
Helm » Context
|
Helm » Context | |
|---|---|
export KUBECONFIG="${HOME}/.kube/aws-kubeconfig.yaml"
export KUBECONFIG="${HOME}/.kube/dev-kubeconfig.yaml"
export KUBECONFIG="${HOME}/.kube/gcp-kubeconfig.yaml"
export KUBECONFIG="${HOME}/.kube/config"
|
cat <<'EXE'| sudo bash
mkdir -p /var/minikube/pvc/pihole/data-pihole-0/
chown -R 0:0 /var/minikube/pvc/pihole/
EXE
|
Helm » Install
|
Helm » Install | |
|---|---|
helm show values mojo2600/pihole --version=2.31.0|less
helm show values mojo2600/pihole --version=2.34.0|less
| |
kubectl get ns|grep pihole
kubectl delete ns pihole || true
kubectl get ns|grep pihole
kubectl create ns pihole || true
|
cat <<ENV | \
kubectl -n=pihole create secret generic pihole --from-env-file=/dev/stdin
password=1EY3QexQoNg3nKMS
ENV
|
|
| |
cat <<'YML'| \
kubectl apply -f -
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pihole-data-pihole-0
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: hostpath
hostPath:
path: /var/hostpath_pv/pihole/data-pihole-0
type: DirectoryOrCreate
YML
|
cat << YML | \
kubectl apply -f -
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
app.kubernetes.io/name: pihole
name: data-pihole-0
namespace: pihole
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: hostpath
volumeName: pihole-data-pihole-0
YML
|
cat <<'YML' | \
helm -n=pihole upgrade -i pihole mojo2600/pihole --version=2.34.0 -f -
---
serviceWeb:
type: LoadBalancer
loadBalancerIP: 192.168.49.110
annotations:
metallb.universe.tf/allow-shared-ip: pihole-svc
metallb.io/ip-allocated-from-pool: metallb-ip-pool
serviceDns:
type: LoadBalancer
loadBalancerIP: 192.168.49.110
annotations:
metallb.universe.tf/allow-shared-ip: pihole-svc
metallb.io/ip-allocated-from-pool: metallb-ip-pool
DNS1: 1.1.1.1
DNS2: 8.8.8.8
dnsmasq:
upstreamServers: []
# - 1.1.1.1
# - 8.8.8.8
customDnsEntries:
- address=/biz.ops/192.168.49.2
- address=/k8s.ops/192.168.49.2
- address=/dev.shahed.biz/10.19.83.100
additionalHostsEntries:
- 192.168.49.109 harbor.k8s.ops
- 192.168.49.110 pihole.k8s.ops
customCnameEntries:
- cname=k8s.ops,pihole.k8s.ops
- cname=harbor.k8s.ops,pihole.k8s.ops
admin:
enabled: true
existingSecret: pihole
passwordKey: password
persistentVolumeClaim:
enabled: true
size: 1Gi
accessModes:
- ReadWriteOnce
storageClass: hostpath
existingClaim: data-pihole-0
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
extraEnvVars:
TZ: UTC-8
virtualHost: pihole.k8s.ops
ingress:
enabled: true
ingressClassName: nginx
annotations:
kubernetes.io/tls-acme: "false"
kubernetes.io/ingress.class: nginx
pathType: ImplementationSpecific
path: /
hosts:
- pihole.k8s.ops
YML
| |
telnet 192.168.49.110 53
|
setsid open http://192.168.49.110 >/dev/null 2>&1 &
|
Helm » Ingress
|
Pi-Hole » Ingress | |
|---|---|
cat <<'YML' | \
kubectl -n pihole apply -f -
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: pihole-cert
namespace: pihole
spec:
secretName: pihole-cert
commonName: pihole.shahed.biz.ops
dnsNames:
- pihole.shahed.biz.ops
- pihole.shahed.biz
- pihole.k8s.ops
ipAddresses:
- 192.168.49.110
- 127.0.0.1
duration: 8760h
renewBefore: 720h
privateKey:
size: 256
encoding: PKCS8
algorithm: ECDSA
rotationPolicy: Always
usages:
- digital signature
- key encipherment
- server auth
- client auth
subject:
countries: ["BD"]
provinces: ["Dhaka"]
postalCodes: ["1500"]
localities: ["Munshiganj"]
organizations: ["Shahed, Inc."]
organizationalUnits: ["pihole.shahed.biz.ops"]
streetAddresses: ["256 Khal East, Passport Office"]
issuerRef:
name: shahed-ecc-sub-ca-2025-k8s
kind: ClusterIssuer
YML
|
Shahed_ECC_Root_CA_2025 » Firefox » Settings » Certificates » View Certificates » Import |
cat <<'YML' | \
kubectl -n pihole patch ingress/pihole --patch-file=/dev/stdin
---
metadata:
annotations:
cert-manager.io/cluster-issuer: shahed-ecc-sub-ca-2025-k8s
spec:
tls:
- hosts:
- pihole.shahed.biz.ops
- pihole.shahed.biz
- pihole.k8s.ops
- 192.168.49.110
- 127.0.0.1
secretName: pihole-cert
YML
|
cat <<'YML' | \
kubectl -n pihole patch ingress/pihole --patch-file=/dev/stdin
---
metadata:
annotations:
cert-manager.io/cluster-issuer: null
spec:
tls: null
YML
|
sudo systemctl restart systemd-resolved
resolvectl status
|
setsid open http://192.168.49.110 >/dev/null 2>&1 &
setsid open htts://pihole.k8s.ops >/dev/null 2>&1 &
|
Helm » Config
|
Pi-Hole » Config |
Pi-Hole » Revert |
|---|---|
cat <<'YML'| \
kubectl -n pihole patch configmap \
pihole-custom-dnsmasq --type merge --patch-file=/dev/stdin
---
data:
02-custom.conf: |
addn-hosts=/etc/addn-hosts
address=/bd.ops/192.168.49.2
address=/io.ops/192.168.49.2
address=/my.ops/192.168.49.2
address=/biz.ops/192.168.49.2
address=/com.ops/192.168.49.2
address=/k8s.ops/192.168.49.2
address=/org.ops/192.168.49.2
address=/group.ops/192.168.49.2
address=/dev.shahed.biz/10.19.83.100
dhcp-option=6,192.168.49.110
05-pihole-custom-cname.conf: |
cname=bd.ops,pihole.k8s.ops
cname=io.ops,pihole.k8s.ops
cname=my.ops,pihole.k8s.ops
cname=biz.ops,pihole.k8s.ops
cname=com.ops,pihole.k8s.ops
cname=k8s.ops,pihole.k8s.ops
cname=org.ops,pihole.k8s.ops
cname=group.ops,pihole.k8s.ops
cname=harbor.k8s.ops,pihole.k8s.ops
addn-hosts: |
192.168.49.109 harbor.k8s.ops
192.168.49.110 pihole.k8s.ops
YML
|
cat <<'YML'| \
kubectl -n pihole patch configmap \
pihole-custom-dnsmasq --type merge --patch-file=/dev/stdin
---
data:
02-custom.conf: |
addn-hosts=/etc/addn-hosts
address=/biz.ops/192.168.49.2
address=/k8s.ops/192.168.49.2
address=/dev.shahed.biz/10.19.83.100
dhcp-option=6,192.168.49.110
05-pihole-custom-cname.conf: |
cname=k8s.ops,pihole.k8s.ops
cname=harbor.k8s.ops,pihole.k8s.ops
addn-hosts: |
192.168.49.109 harbor.k8s.ops
192.168.49.110 pihole.k8s.ops
YML
|
cat <<'YML' | \
kubectl -n pihole patch deploy/pihole --patch-file=/dev/stdin
---
spec:
replicas: 0
YML
|
cat <<'YML' | \
kubectl -n pihole patch deploy/pihole --patch-file=/dev/stdin
---
spec:
replicas: 1
YML
|
sudo vim /etc/systemd/resolved.conf
:'
[Resolve]
DNS=192.168.49.110 192.168.49.2 10.19.83.100 1.1.1.1 8.8.8.8
FallbackDNS=1.1.1.1 8.8.8.8
DNSStubListener=yes
Domains=~.
'
sudo systemctl restart systemd-resolved
resolvectl status
|
kubectl -n pihole run -i --tty --rm pihole-cli \
--image=alpine --restart=Never --command -- ash
apk --update add curl bind-tools inetutils-telnet
ping pihole.k8s.ops
dig +short pihole.k8s.ops
dig +short host.k8s.ops
nslookup www.k8s.ops
ping www.k8s.ops
|
docker run --rm alpine cat /etc/resolv.conf
docker run --rm alpine cat /etc/hosts
|
docker run --rm alpine ping www.k8s.ops
docker run --rm alpine ping google.com
|
Host » Resolv
|
Helm » Resolv | |
|---|---|
cat << EXE | sudo tee /etc/NetworkManager/dispatcher.d/minikube-ifupdown >/dev/null
#!/bin/bash
: '
@vendor Shahed Academia, Inc.
@web https://cdn.shahed.biz/docs/academia
@version 2025.10.10
@since 2025.10.10
'
USER_TARGET="minikube"
IFACE="\${1}"
ACTION="\${2}"
function silent_exit(){
exit 0
}
function verify_addr(){
printf -v DNS_HOST '%s' \$(sudo -u \${USER_TARGET} minikube ip)
if [[ "\${DNS_HOST}" =~ ^(([1-9]?[0-9]|1[0-9][0-9]|2([0-4][0-9]|5[0-5]))\.){3}([1-9]?[0-9]|1[0-9][0-9]|2([0-4][0-9]|5[0-5]))$ ]]; then
if [[ "\${DNS_HOST}" == '127.0.0.1' ]]; then silent_exit; fi
else silent_exit; fi
}
function verify_kube(){
printf -v K8S_JSON '%s' \$(sudo -u \${USER_TARGET} minikube status -o 'json')
printf -v K8S_REST '%s' \$(echo \${K8S_JSON} | jq -r '.APIServer')
printf -v K8S_KUBE '%s' \$(echo \${K8S_JSON} | jq -r '.Kubelet')
printf -v K8S_HOST '%s' \$(echo \${K8S_JSON} | jq -r '.Host')
if [[ "\${K8S_HOST}" == 'Running' ]]&&
[[ "\${K8S_KUBE}" == 'Running' ]]&&
[[ "\${K8S_REST}" == 'Running' ]]; then verify_addr
else silent_exit; fi
}
function verify_tool(){
if [[ -x "\$(command -v jq)" ]]; then verify_kube; else silent_exit; fi
}
function verify_mini(){
if [[ -x "\$(command -v minikube)" ]]; then verify_tool; else silent_exit; fi
}
function verify_link(){
HOST_ETHERS="\$(ip -j link show | jq -r '.[].ifname'| paste -sd' ' -)"
if [[ "\${HOST_ETHERS}" =~ "\${IFACE}" ]]; then verify_mini; else silent_exit; fi
}
function verify_mode(){
if [[ "\${ACTION}" == 'up' ]]; then verify_link; else verify_mini; fi
}
function verify(){
verify_mode
}
function amend_nameserver(){
RESOLV_CONF='/etc/resolv.conf'
if [[ -f \${RESOLV_CONF} ]]&&[[ "\$(grep -c \${DNS_HOST} \${RESOLV_CONF})" == 0 ]];then
cat << CON | sudo tee \${RESOLV_CONF} >/dev/null
# /etc/resolv.conf replaced by the minikube dispatcher
# /etc/NetworkManager/dispatcher.d/minikube-ifupdown
#
# Dynamic resolv.conf (using network dispatch)
# Primary DNS: Pi-Hole (192.168.49.110)
# Secondary DNS: Minikube (192.168.49.2 )
# Backup DNS: Office DNS (10.19.83.100 )
# Loopback DNS: hostnamectl (127.0.0.53 )
# Public DNS fallback: Cloudflare + Google
nameserver 192.168.49.110
nameserver 192.168.49.2
nameserver 10.19.83.100
nameserver 127.0.0.53
nameserver 1.1.1.1
nameserver 8.8.8.8
search ops local .
options edns0 trust-ad
CON
else silent_exit; fi
}
function handle(){
amend_nameserver
}
function init(){
verify
handle
}
init
EXE
sudo chmod +x /etc/NetworkManager/dispatcher.d/minikube-ifupdown
| |
sudo systemctl restart systemd-resolved
resolvectl status
|
dig +short pihole.k8s.ops
dig +short host.k8s.ops
|
Helm » Rollout
|
Pi-Hole » Rollout | |
|---|---|
kubectl -n pihole annotate deploy/pihole --overwrite \
kubernetes.io/change-cause="CKI-1| Initial Deployment"
|
kubectl -n pihole rollout history deploy/pihole
kubectl -n pihole rollout pause deploy/pihole
|
|
Pi-Hole » Rollout |
Pi-Hole » Revert |
cat <<'YML' | \
kubectl -n pihole patch deploy/pihole --patch-file=/dev/stdin
---
spec:
template:
spec:
containers:
- name: pihole
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 200m
memory: 256Mi
YML
|
cat <<'YML' | \
kubectl -n pihole patch deploy/pihole --patch-file=/dev/stdin
---
spec:
template:
spec:
containers:
- name: pihole
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
YML
|
kubectl -n pihole annotate deploy/pihole --overwrite \
kubernetes.io/change-cause="CKI-2| Resources Updated"
|
kubectl -n pihole rollout resume deploy/pihole
kubectl -n pihole rollout history deploy/pihole
|
kubectl -n pihole rollout undo deploy/pihole --to-revision=1
kubectl -n pihole rollout history deploy/pihole
|
kubectl -n pihole annotate deploy/pihole --overwrite \
kubernetes.io/change-cause="CKI-3| Revert Back to CKI-1"
|
kubectl -n pihole get deploy pihole -o yaml \
| yq -P '.spec.template.spec.containers[]|select(.name == "pihole")|.resources'
|
kubectl -n pihole get deploy pihole \
-o jsonpath='{.spec.template.spec.containers[?(@.name=="pihole")].resources}' | yq -P
|
Helm » Uninstall
|
Helm » Uninstall |
|---|
kubectl delete ns pihole
helm -n pihole status pihole
helm -n pihole get all pihole
helm -n pihole uninstall pihole
kubectl -n pihole delete pvc --all
kubectl delete pv pihole-data-pihole-0
|
Playground
|
Playground | |
|---|---|
helm -n pihole install pihole mojo2600/pihole --version=2.33.0
helm -n pihole upgrade -i pihole mojo2600/pihole --version=2.34.0
helm show values mojo2600/pihole --version=2.34.0|less
| |
kubectl -n pihole get secret pihole -o json|jq -r '.data.password'|base64 -d;echo
kubectl -n pihole exec -it svc/pihole-web -c pihole -- ash
kubectl -n pihole logs -f svc/pihole-web -c pihole
kubectl -n pihole logs -f svc/pihole-web
| |
kubectl config --kubeconfig=${HOME}/.kube/aws-kubeconfig.yaml view --flatten
kubectl config --kubeconfig=${HOME}/.kube/dev-kubeconfig.yaml view --flatten
kubectl config --kubeconfig=${HOME}/.kube/gcp-kubeconfig.yaml view --flatten
kubectl config --kubeconfig=${HOME}/.kube/config view --flatten
| |
kubectl -n pihole delete all --all
kubectl -n pihole delete ing --all
kubectl -n pihole delete sts --all
|
kubectl delete pv pihole-data-pihole-0
kubectl -n pihole delete svc --all
kubectl -n pihole delete pvc --all
|
kubectl -n pihole rollout history deploy pihole
kubectl -n pihole rollout restart deploy pihole
kubectl -n pihole rollout status deploy pihole
|
kubectl -n pihole exec -it svc/pihole-web -c pihole -- ash
kubectl -n pihole logs -f svc/pihole-web -c pihole
kubectl -n pihole logs -f svc/pihole-web
|
References
|
References | ||
|---|---|---|