From 668d313ccf630ebb80170664cd8aaa2c73ebc076 Mon Sep 17 00:00:00 2001 From: Drew Bednar Date: Mon, 28 Jun 2021 19:25:07 -0400 Subject: [PATCH] Commiting ch7 work --- README.md | 121 +++++++++++++++++++++++++++++++++- manifests/# Hexapod | 26 ++++++++ manifests/kuard-pod-full.yaml | 55 ++++++++++++++++ manifests/kuard-pod-vol.yaml | 27 ++++++++ objects.md | 19 ++++++ 5 files changed, 247 insertions(+), 1 deletion(-) create mode 100644 manifests/# Hexapod create mode 100644 manifests/kuard-pod-full.yaml create mode 100644 manifests/kuard-pod-vol.yaml create mode 100644 objects.md diff --git a/README.md b/README.md index 2fbefeb..45f2a3e 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,6 @@ To make it easier on you copy your kube config file to the root of this project and name it kubeconfig.conf. Then just remember to run source `scripts/profile` to start using the kubectl command. I am just setting the KUBECONFIG envar to the kubeconfig.conf file for the bash session. - ## Accessing a pod from your laptop Portforwarding @@ -26,6 +25,7 @@ kubectl logs -f ``` To view logs from the previous pod. Useful if the pod instances keep restarting. + ``` kubectl logs --previous ``` @@ -49,6 +49,7 @@ kubectl exec -it ## Copying files to and from a running container +This is gnerally an anti-pattern. You should be treating the contents of a container as immutable. ``` kubectl cp : @@ -73,3 +74,121 @@ If we wanted to limit cpu you could use ``` docker run -d --name kuard -p 8080:8080 --memory 200m --memory-swap 1G --cpu-shares 1024 docker1.runcible.io:5151/kuard:latest ``` + +## Exposing a service + +### Legacy (1.17?) way + +THIS APPARENTLY IS THE LEGACY WAY OF DOING IT. + +Start by creating a deployment + +``` +kubectl run alpaca-prod \ +--image=gcr.io/kuar-demo/kuard-amd64:blue \ +--replicas=3 \ +--port=8080 \ +--labels="ver=1,app=alpaca,env=prod" +``` + +Then expose the deployment with a Service + +``` +kubectl expose deployment alpaca-prod +``` + +Then check on your service + +``` +kubectl get services -o wide +``` + +Consider adding a readiness check to the deployment. This will be used by the service to only forward traffic +to ready services. You can watch the endpoints used by the service (and watch containers removed from a service) +with: + +``` +kubectl get endpoints alpaca-prod --watch +``` + +#### The new way + +Note: kubectl create deployment doesn't support `--labels=` keyword for some dumb fucking reason. + +Create the deployment + +``` +kubectl create deployment alpaca-prod \ +--image=gcr.io/kuar-demo/kuard-amd64:blue \ +--replicas=3 \ +--port=8080 \ +``` + +Label it and the pods + +``` +kubectl label deployment env=prod ver=1 +``` + +``` +kubectl label pod --selector=app=alpaca-prod env=prod ver=1 +``` + +Expose the service while also defining the selector + +``` +kubectl expose deployment --type=NodePort --selector="app=alpaca-prod,ver=1,env=prod" +``` + +Then check on your service + +``` +kubectl get services -o wide +``` + +Consider adding a readiness check to the deployment. This will be used by the service to only forward traffic +to ready services. You can watch the endpoints used by the service (and watch containers removed from a service) +with: + +``` +kubectl get endpoints alpaca-prod --watch +``` + +### Accessing the exposed service + +A cheap way in dev is just to use port forwarding + +``` +ALPACA_PROD=$(kubectl get pods -l app=alpaca -o jsonpath='{items[0].metadata.name}') +kubectl port-forward $ALPACA_PROD 48858:8080 +``` + +Another potentially production capable alternative is to use a NopePort type. This will open a port on all workers +that will forward traffic to the service. + +Option 1: Expose as NodePort + +``` +kubectl expose deployment --type=NodePort alpaca-prod +``` + +Option 2: Modify Service switching to NodePort + +``` +kubectl edit service alpaca-prod +``` + +change the `spec.type` field to NodePort and save. + +check the port it is being served under: + +``` +kubectl describe service alpaca-prod +``` + +## LoadBalancer Services + +If the cloud environment supports it you should be able to edit the `spec.type` to us `LoadBalancer`. +This builds on top of `NodePort` and your cloud provider create a new load balancerand direct it at +nodes in your cluster. This should eventually assign an EXTERNAL-IP with a public IP(or hostname) +assigned by the cloud vendor. diff --git a/manifests/# Hexapod b/manifests/# Hexapod new file mode 100644 index 0000000..e99fd29 --- /dev/null +++ b/manifests/# Hexapod @@ -0,0 +1,26 @@ +# Hexapod + + +## Resouces or Inspiration + +The pheonix code stuff is kind of old but look up this guy and his repos supports a quadmode also: +https://github.com/KurtE/Arduino_Phoenix_Parts +See also https://www.robotshop.com/community/robots/show/interbotix-phantomx-hexapod + +[Hiwonder SpiderPi](https://www.hiwonder.hk/products/robosoul-spiderpi-ai-intelligent-visual-hexapod-robot-powered-by-raspberry-pi). Uses python, but fuck if I can find the code. + + +### Markwtech + +A very nice project. Available on thingiverse an + +https://markwtech.com/robots/hexapod/ +https://www.thingiverse.com/thing:3463845?collect + + + +## Gates +tripod, ripple, wave walking gates +adaptive gate to walk on uneven terrain. The interrobotics guys did this with the robotis api. +Translate and rotate in place + diff --git a/manifests/kuard-pod-full.yaml b/manifests/kuard-pod-full.yaml new file mode 100644 index 0000000..7585a00 --- /dev/null +++ b/manifests/kuard-pod-full.yaml @@ -0,0 +1,55 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kuard + labels: + app: kuard + version: dirp +spec: + volumes: + - name: "kuard-data" + hostPath: + path: "/var/lib/kuard" + # - name: "kuard-data" + # nfs: + # server: "my.nfs.server.local" + # path: "/exports" + containers: + - image: gcr.io/kuar-demo/kuard-amd64:blue + + name: kuard + + volumeMounts: + - mountPath: "/data" + name: "kuard-data" + + livenessProbe: + httpGet: + path: /healthy + port: 8080 + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 10 + failureThreshold: 3 + + readinessProbe: + httpGet: + path: "/ready" + port: 8080 + initialDelaySeconds: 30 + timeoutSeconds: 1 + periodSeconds: 10 + failureThreshold: 3 + + ports: + - containerPort: 8080 + name: http + protocol: TCP + + resources: + requests: + cpu: "500m" + memory: "128Mi" + limits: + cpu: "1000m" + memory: "256Mi" diff --git a/manifests/kuard-pod-vol.yaml b/manifests/kuard-pod-vol.yaml new file mode 100644 index 0000000..1a9c2ca --- /dev/null +++ b/manifests/kuard-pod-vol.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kuard +spec: + volumes: + - name: "kuard-data" + hostPath: + path: "/var/lib/kuard" + containers: + - image: gcr.io/kuar-demo/kuard-amd64:blue + name: kuard + volumeMounts: + - mountPath: "/data" + name: "kuard-data" + livenessProbe: + httpGet: + path: /healthy + port: 8080 + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 10 + failureThreshold: 3 + ports: + - containerPort: 8080 + name: http + protocol: TCP diff --git a/objects.md b/objects.md new file mode 100644 index 0000000..f3fa7b8 --- /dev/null +++ b/objects.md @@ -0,0 +1,19 @@ +## + +## Deployments + +## Services + +## Node Ports + +## End Points + +The "buddy" of a service. Contains the IP addresses for that service + +``` +kubectl describe endpoints +``` + +## Ingress + +Multiple ingress objects are merged together into a single config for K8s internal HTTP Load balancing system.