-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathexam-3
121 lines (107 loc) · 3.7 KB
/
exam-3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
Q01:
kubectl create sa pvviewer
kubectl create clusterrole pvviewer-role --resource=persistentvolumes --verb=list
kubectl create clusterrolebinding pvviewer-role-binding --clusterrole=pvviewer-role --serviceaccount=default:pvviewer
kubectl run --generator=run-pod/v1 pvviewer --image=redis --dry-run -o yaml > pvviewer.yaml
#Add serviceAccountName: pvviewer
apiVersion: v1
kind: Pod
metadata:
name: pvviewer
spec:
containers:
- image: redis
imagePullPolicy: IfNotPresent
name: pvviewer
serviceAccountName: pvviewer
-------------------------------------------------------------------------------------------------------------------------
Q02:
kubectl get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}' > /root/node_ips OR
kubectl get nodes -o jsonpath='{.items[*].status.addresses[].address}' > /root/node_ips
Refer: https://kubernetes.io/docs/reference/kubectl/cheatsheet/
-------------------------------------------------------------------------------------------------------------------------
Q03:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: multi-pod
spec:
containers:
- image: nginx
name: alpha
env:
- name: name
value: alpha
- image: busybox
name: beta
command: ["sleep", "4800"]
env:
- name: name
value: beta
-------------------------------------------------------------------------------------------------------------------------
Q04
apiVersion: v1kind: Pod
metadata:
name: lion
spec:
containers:
- image: redis:alpine
name: lion resources:
limits:
cpu: "2" memory: "500Mi"
-------------------------------------------------------------------------------------------------------------------------
Q05
kubectl run --generator=run-pod/v1 test-nc --image=busybox:1.28 --rm -it -- nc -z -v -w2 np-test-service 80
#The above command should time out
#Now Create a network Policy allowing connections from All to port 80 to pod np-test-1 (use label: run=np-test-1)
apiVersion: extensions/v1beta1
kind: NetworkPolicy
metadata:
generation: 1
name: ingress-to-nptest
spec:
podSelector:
matchLabels:
run: np-test-1
policyTypes:
- Ingress
ingress:
- ports:
- port: 80
protocol: TCP
#Check connectivity Again:
master $ kubectl run --generator=run-pod/v1 test-nc --image=busybox:1.28 --rm -it -- sh
If you don't see a command prompt, try pressing enter.
/ # nc -z -v -w 2 np-test-service 80
np-test-service (10.110.179.146:80) open
/ # ^C
/ # Session ended, resume using 'kubectl attach test-nc -c test-nc -i -t' command when the pod is running
pod "test-nc" deleted
master $
-------------------------------------------------------------------------------------------------------------------------
Q06
kubectl taint node node01 env_type=production:NoSchedule
master $ kubectl describe node node01 | grep -i taint
Taints: env_type=production:NoSchedule
master $
kubectl run --generator=run-pod/v1 dev-redis --image=redis:alpine
#Check that the pod is not scheduled on node01
#Create prod-redis pod with toleration
apiVersion: v1
kind: Pod
metadata:
name: prod-redis
namespace: default
spec:
containers: - image: redis:alpine
name: prod-redis
tolerations:
- effect: NoSchedule
key: env_type
value: production
operator: Equal
kubectl create -f prod-redis.yaml
#Check that the pod is running. Pod can be scheduled on node01 but it is not necessary that the scheduler will exclusively place it
on node01. Since there is no node affinity specified, the scheduler can place it on any node.
-------------------------------------------------------------------------------------------------------------------------