Skip to content

Kuberentes

webdesc
kubernetes video
快速构建集群脚本

设置主机名

sh
hostnamectl set-hostname master

hostname

# 查看操作系统版本 > 7.5
cat /etc/redhat-release

环境搭建

主机域名解析

sh
vim /etc/hosts

192.168.109.100 master
192.168.109.101 node1
192.168.109.102 node2

时间同步

sh
systemctl status chronyd
systemctl start chronyd
systemctl enable chronyd

date

禁用iptables和firewalld服务

sh
systemctl stop firewalld
systemctl disable firewalld

systemctl status iptables
systemctl stop iptables
systemctl disable iptables

禁用selinux

sh
 sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
 
 # 重启
 
 getenforce

禁用swap

sh
sed -i 's|/dev/mapper/centos-swap|# /dev/mapper/centos-swap|' /etc/fstab

 # 重启

修改Linux的内核参数

sh
# vim /etc/sysctl.d/kubernetes.conf

net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1

# reload
sysctl -p

# 加载网桥过滤模块
modprobe br_netfilter

# 查看网桥过滤模块是否加载成功
lsmod | grep br_netfilter

配置ipvs功能

sh
# install ipset and ipvsadm
yum install -y ipset ipvsadmin

# 添加需要加载的模块写入脚本文件
cat <<EOF > /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

# add execute mod for file
chmod +x /etc/sysconfig/modules/ipvs.modules

# 执行文件
/bin/bash /etc/sysconfig/modules/ipvs.modules

# 查看对应模块是否加载成功
lsmod | grep -e ip_vs -e nf_conntrack_ipv4

nf_conntrack_ipv4      19149  0
nf_defrag_ipv4         12729  1 nf_conntrack_ipv4
ip_vs_sh               12688  0
ip_vs_wrr              12697  0
ip_vs_rr               12600  0
ip_vs                 145458  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          143411  2 ip_vs,nf_conntrack_ipv4
libcrc32c              12644  3 xfs,ip_vs,nf_conntrack

重启

sh
reboot

卸载旧docker

sh
systemctl stop docker

# or 如果遇到以下错误 删除特定版本的docker
# Transaction check error:
#  file /usr/bin/docker from install of docker-ce-18.06.3.ce-3.el7.x86_64 conflicts with file from package docker-ce-cli-1:26.1.4-1.el7.x86_64
#  file /usr/share/bash-completion/completions/docker from install of docker-ce-18.06.3.ce-3.el7.x86_64 conflicts with file from package docker-ce-cli-1:26.1.4-1.el7.x86_64

yum remove -y docker-ce-cli-1:26.1.4-1.el7.x86_64

# or 

yum remove -y docker \
    docker-client \
    docker-client-latest \
    docker-common \
    docker-latest \
    docker-latest-logrotate \
    docker-logrotate \
    docker-selinux \
    docker-engine-selinux \
    docker-engine \
    docker-ce

安装docker

sh
# 切换镜像源
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo

yum makecache fast

# 查看镜像源所支持的docker版本
yum list docker-ce --showduplicates

# 安装特定版本的docker-ce
# 必须指定--setopt=obsoletes=0 否则会自动安装最高版本
yum install -y --setopt=obsoletes=0 docker-ce-18.06.3.ce-3.el7

# 添加配置
# docker 默认使用cgroup driver 为cgroupfs, 而kubernetes推荐使用systemd
mkdir /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "registry-mirrors": [
      "https://swr.cn-north-4.myhuaweicloud.com",
      "https://ccr.ccs.tencentyun.com"
  ]
}
EOF

# start
systemctl start docker
docker version
systemctl enable docker

添加kubernetes的阿里云源

sh
cat >> /etc/yum.repos.d/kubernetes.repo <<EOF 
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安装k8s核心组件

sh
# install kubeadm kubelet kubectl
yum install -y --setopt=obsoletes=0 kubeadm-1.17.4-0 kubelet-1.17.4-0 kubectl-1.17.4-0

# 配置kubelet的cgroup 编辑/etc/sysconfig/kubelet,添加下面的配置

sudo tee /etc/sysconfig/kubelet <<-'EOF'
KUBELET_CGROUP_ARGS="-cgroup-driver=systemd"
KUBE_PROXY_MODE="ipvs"
EOF

# 设置kubelet开机自启
systemctl enable kubelet

准备集群镜像

sh
# 在安装kubernetes集群之前,必须要提前准备好集群需要的景象,所需镜像可以通过下面命令查看
kubeadm config images list

I0322 17:37:13.996185   12414 version.go:251] remote version is much newer: v1.32.3; falling back to: stable-1.17
W0322 17:37:17.635642   12414 validation.go:28] Cannot validate kubelet config - no validator is available
W0322 17:37:17.635665   12414 validation.go:28] Cannot validate kube-proxy config - no validator is available
k8s.gcr.io/kube-apiserver:v1.17.17
k8s.gcr.io/kube-controller-manager:v1.17.17
k8s.gcr.io/kube-scheduler:v1.17.17
k8s.gcr.io/kube-proxy:v1.17.17
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.4.3-0
k8s.gcr.io/coredns:1.6.5

# 下载镜像,此镜像在kubernetes的仓库中,网络原因无法下载,替代方案
images=(
	kube-apiserver:v1.17.4
  kube-controller-manager:v1.17.4
  kube-scheduler:v1.17.4
  kube-proxy:v1.17.4
  pause:3.1
  etcd:3.4.3-0
  coredns:1.6.5
)


for imageName in ${images[@]} ; do
	docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
	docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
	docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName 
done

制作虚拟机Snapshot

Clone当前虚拟机,后续worker node 的初始化可以基于这个snapshot进行安装。

初始化主节点

以下初始化都是在master节点完成,node节点不需要。

sh
# 创建集群
kubeadm init --kubernetes-version=v1.17.4 \
						 --pod-network-cidr=10.244.0.0/16 \
						 --service-cidr=10.96.0.0/12 \
             --apiserver-advertise-address=192.168.109.100 
             

# 创建必要文件
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

# ----------------------------------------------------------
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.109.100:6443 --token hsr22s.zvy38bd7tiwfkji9 \
    --discovery-token-ca-cert-hash sha256:7097f58b183a3c31f9b7e170f3b9a266955d2705fc2945e81ffa1f5c55ec8957
    
# -----------------------------------------------------------

# 查看节点
kubectl get nodes

# 重置
kubeadm reset

安装网络插件

kubernetes支持多种网络插件,比如flannel,calico,canal等等。插件使用的是DaemonSet的控制器,选用flannel

sh
# get fannel config file
wget https://ghfast.top/https://github.com/flannel-io/flannel/blob/221a83cab893a4a724aaff0bb53fbfd14a7724e4/Documentation/kube-flannel.yml

# 修改文件中quay.io仓库为quay-mirror.qiniu.com

# 使用配置文件启动fannel
kubectl apply -f kube-flannel.yml

# 稍等片刻,再次查看集群节点状态
kubectl get nodes

服务部署

部署kubernetes集群中一个Nginx程序,测试。

sh
# deploy nginx
kubectl create deployment nginx --image=nginx:1.14-alpine

# expose port
kubectl expose deployment nginx --port=80 --type=NodePort

# 查看服务状态
kubectl get pod,sevice,svc

curl http://192.168.109.101:31892

资源管理

学习kubernetes的核心,就是学习如何对集群上的Pod、Pod控制器、service、存储等各种资源进行管理操作。

资源管理方式

类型操作对象适合环境有点缺点示例
命令式对象管理对象测试简单只能操作对象,无法审计、跟踪kubectl run nginx-pod --image=nginx:1.17.1 --port=80
命令式对象配置文件开发可以审计、跟踪项目大时,配置文件多,操作麻烦kubectl create/pathc -f nginx-pod.yaml
声明式对象配置目录开发支持目录操作意外情况下难以调试kubectl apply -f nginx-pod.yaml

命令式对象管理

kubectl是kubernetes集群的命令行工具,能够对集群本身进行管理,并能够在集群上进行容器化应用安装部署,语法如下:

sh
kubectl [command] [type] [name] [flags]

command:制定要对资源执行的操作,如create、get、delete

type:指定资源,如deployment、pod、service

name:指定资源名称,名称大小写铭感

flags:额外可选参数

sh
# show all pod
kubectl get pod 

# shou one pod
kubectl get pod pod_name

# show detail pod
kubectl get pod pod_name -o wide/json/yaml
操作
sh
kubectl --help

kubectl controls the Kubernetes cluster manager.

 Find more information at: https://kubernetes.io/docs/reference/kubectl/overview/

Basic Commands (Beginner):
  create         Create a resource from a file or from stdin.
  expose         Take a replication controller, service, deployment or pod and expose it as a new Kubernetes Service
  run            Run a particular image on the cluster
  set            Set specific features on objects

Basic Commands (Intermediate):
  explain        Documentation of resources
  get            Display one or many resources
  edit           Edit a resource on the server
  delete         Delete resources by filenames, stdin, resources and names, or by resources and label selector

Deploy Commands:
  rollout        Manage the rollout of a resource
  scale          Set a new size for a Deployment, ReplicaSet or Replication Controller
  autoscale      Auto-scale a Deployment, ReplicaSet, or ReplicationController

Cluster Management Commands:
  certificate    Modify certificate resources.
  cluster-info   Display cluster info
  top            Display Resource (CPU/Memory/Storage) usage.
  cordon         Mark node as unschedulable
  uncordon       Mark node as schedulable
  drain          Drain node in preparation for maintenance
  taint          Update the taints on one or more nodes

Troubleshooting and Debugging Commands:
  describe       Show details of a specific resource or group of resources
  logs           Print the logs for a container in a pod
  attach         Attach to a running container
  exec           Execute a command in a container
  port-forward   Forward one or more local ports to a pod
  proxy          Run a proxy to the Kubernetes API server
  cp             Copy files and directories to and from containers.
  auth           Inspect authorization

Advanced Commands:
  diff           Diff live version against would-be applied version
  apply          Apply a configuration to a resource by filename or stdin
  patch          Update field(s) of a resource using strategic merge patch
  replace        Replace a resource by filename or stdin
  wait           Experimental: Wait for a specific condition on one or many resources.
  convert        Convert config files between different API versions
  kustomize      Build a kustomization target from a directory or a remote url.

Settings Commands:
  label          Update the labels on a resource
  annotate       Update the annotations on a resource
  completion     Output shell completion code for the specified shell (bash or zsh)

Other Commands:
  api-resources  Print the supported API resources on the server
  api-versions   Print the supported API versions on the server, in the form of "group/version"
  config         Modify kubeconfig files
  plugin         Provides utilities for interacting with plugins.
  version        Print the client and server version information

Usage:
  kubectl [flags] [options]

Use "kubectl <command> --help" for more information about a given command.
Use "kubectl options" for a list of global command-line options (applies to all commands).
资源类型
sh
kubectl api-resources

NAME                              SHORTNAMES   APIGROUP                       NAMESPACED   KIND
bindings                                                                      true         Binding
componentstatuses                 cs                                          false        ComponentStatus
configmaps                        cm                                          true         ConfigMap
endpoints                         ep                                          true         Endpoints
events                            ev                                          true         Event
limitranges                       limits                                      true         LimitRange
namespaces                        ns                                          false        Namespace
nodes                             no                                          false        Node
persistentvolumeclaims            pvc                                         true         PersistentVolumeClaim
persistentvolumes                 pv                                          false        PersistentVolume
pods                              po                                          true         Pod
podtemplates                                                                  true         PodTemplate
replicationcontrollers            rc                                          true         ReplicationController
resourcequotas                    quota                                       true         ResourceQuota
secrets                                                                       true         Secret
serviceaccounts                   sa                                          true         ServiceAccount
services                          svc                                         true         Service
mutatingwebhookconfigurations                  admissionregistration.k8s.io   false        MutatingWebhookConfiguration
validatingwebhookconfigurations                admissionregistration.k8s.io   false        ValidatingWebhookConfiguration
customresourcedefinitions         crd,crds     apiextensions.k8s.io           false        CustomResourceDefinition
apiservices                                    apiregistration.k8s.io         false        APIService
controllerrevisions                            apps                           true         ControllerRevision
daemonsets                        ds           apps                           true         DaemonSet
deployments                       deploy       apps                           true         Deployment
replicasets                       rs           apps                           true         ReplicaSet
statefulsets                      sts          apps                           true         StatefulSet
tokenreviews                                   authentication.k8s.io          false        TokenReview
localsubjectaccessreviews                      authorization.k8s.io           true         LocalSubjectAccessReview
selfsubjectaccessreviews                       authorization.k8s.io           false        SelfSubjectAccessReview
selfsubjectrulesreviews                        authorization.k8s.io           false        SelfSubjectRulesReview
subjectaccessreviews                           authorization.k8s.io           false        SubjectAccessReview
horizontalpodautoscalers          hpa          autoscaling                    true         HorizontalPodAutoscaler
cronjobs                          cj           batch                          true         CronJob
jobs                                           batch                          true         Job
certificatesigningrequests        csr          certificates.k8s.io            false        CertificateSigningRequest
leases                                         coordination.k8s.io            true         Lease
endpointslices                                 discovery.k8s.io               true         EndpointSlice
events                            ev           events.k8s.io                  true         Event
ingresses                         ing          extensions                     true         Ingress
ingresses                         ing          networking.k8s.io              true         Ingress
networkpolicies                   netpol       networking.k8s.io              true         NetworkPolicy
runtimeclasses                                 node.k8s.io                    false        RuntimeClass
poddisruptionbudgets              pdb          policy                         true         PodDisruptionBudget
podsecuritypolicies               psp          policy                         false        PodSecurityPolicy
clusterrolebindings                            rbac.authorization.k8s.io      false        ClusterRoleBinding
clusterroles                                   rbac.authorization.k8s.io      false        ClusterRole
rolebindings                                   rbac.authorization.k8s.io      true         RoleBinding
roles                                          rbac.authorization.k8s.io      true         Role
priorityclasses                   pc           scheduling.k8s.io              false        PriorityClass
csidrivers                                     storage.k8s.io                 false        CSIDriver
csinodes                                       storage.k8s.io                 false        CSINode
storageclasses                    sc           storage.k8s.io                 false        StorageClass
volumeattachments

namespace的创建于删除

sh
# create
kubectl create namespace dev

# check
kubectl get ns

# show a nginx pod in dev namespace
kubectl run pod --image=nginx -n dev

# check
kubectl get pods -n dev

kubectl describe pods pod-864f9875b9-7t9xw -n dev

# delete namespace dev
kubectl delete ns dev

# check
kubectl get namespace

命令式对象配置

创建一个nginxpod.yaml

yaml
apiVersion: v1
kind: Namespace
metadata:
    name: dev

---

apiVersion: v1
kind: Pod
metadata:
    name: nginxpod
    labels:
        app: dev
spec:
    containers:
        - name: nginx-containers
          image: nginx:1.17.1

create resource

sh
kubectl create -f nginxpod.yaml

声明式对象配置

sh
kubectl apply -f nginxpod.yaml

kubectl describe pod nginxpod

实战

Namespace

sh
kubectl get namespace

NAME              STATUS   AGE
default           Active   5h46m # 默认
dev               Active   55m   # 新建
kube-flannel      Active   4h53m # 网络空间
kube-node-lease   Active   5h46m # 集群节点之间心跳维护
kube-public       Active   5h46m # 所有人都可以访问(包括未认证用户)
kube-system       Active   5h46m # 所有由kubernetes系统创建的资源都处于这个命名空间

# get default
kubectl get ns default

# get system namespace
kubectl get pod -n kube-system

NAME                             READY   STATUS    RESTARTS   AGE
coredns-6955765f44-lkqk8         1/1     Running   0          5h50m
coredns-6955765f44-rh5zz         1/1     Running   0          5h50m
etcd-master                      1/1     Running   0          5h50m
kube-apiserver-master            1/1     Running   0          5h50m
kube-controller-manager-master   1/1     Running   0          5h50m
kube-flannel-ds-amd64-8n54r      1/1     Running   0          4h32m
kube-flannel-ds-amd64-hrrpv      1/1     Running   0          4h32m
kube-flannel-ds-amd64-hwwnn      1/1     Running   0          4h32m
kube-proxy-8jkw4                 1/1     Running   0          5h50m
kube-proxy-jdvdl                 1/1     Running   0          5h35m
kube-proxy-pgnzk                 1/1     Running   0          5h35m
kube-scheduler-master            1/1     Running   0          5h50m

# get detail
kubectl get pod -n kube-system -o wide

# delete
kubectl delete ns dev

Pod

kubernetes在集群启动之后,集群中的各个组件都是以pod方式运行的

sh
kubectl get pod -n kube-system

创建并运行

kubernetes通过pod控制器来运行pod

sh
# 命令格式: kubectl run (pod控制器名称) [参数]
kubectl create ns dev

kubectl run nginx --image=nginx:1.17.1 --port=80 --namespace=dev

配置操作

编辑文件pod-nginx.yaml

yaml
apiVersion: v1
kind: Namespace
metadata:
    name: dev

---
apiVersion: v1
kind: Pod
metadata:
    name: nginxpod
    namespace: dev
spec:
    containers:
        - image: nginx:1.17.1
          name: pod
          ports:
              - name: nginx-port
                containerPort: 80
                protocol: TCP

操作

sh
# create
kubectl create -f pod-nginx.yaml 

# delete
kubectl delete -f pod-nginx.yaml

Label

Label是kubernetes系统中的一个重要概念。它的作用就是在资源上添加标识,用来对它们进行区分和选择。Label的特点:

  • Label会以key/value键值对的形式附加到各种对象上,如Node、 Pod、 Service等等
  • 一个资源对象可以定义任意数量的Label,同一个Label也可以被添加到任意数量的资源对象上去
  • Label通常在资源对象定义时确定,当然也可以在对象创建后动态添加或者删除

可以通过Label实现资源的多维度分组,以便灵活、方便地进行资源分配、调度、配置、部署等管理工作。

一些常用的Label示例如下:

  • ·版本标签:"version":"release", "vers ion": "stable"
  • ·环境标签:"environment":"dev", "environmer nt":"test", "en vironment":"pro"
  • 架构标签:"tier":"frontend", "tier":"backend"

标签定义完毕之后,还要考虑到标签的选择,这就要使用到Label Selector,即:

  • Label用于给某个资源对象定义标识
  • Label Selector用于查询和筛选拥有某些标签的资源对象

当前有两种Label Selector:

  1. 基于等式的Label Selector
    • name = slave:选择所有包含Label中key="name"且value="slave"的对象
    • env != product ion:选择所有包括Label中的key="env"且value不等于"production"的对象
  2. 基于集合的Label Selector
    • name in (master, slave):选择所有包含Label中key="name"且value="slave"或value="slave"的对象
    • name not in (frontend):选择所有包括Label中的key="name"且value不等于"frontend"的对象

标签选择条件可以使用多个,此时将多个Label Selector进行组合,使用逗号,隔开即可。

命令式打标签

sh
# label
kubectl label pod nginx -n dev version=1.0  

# check label
kubectl get pod -n dev -o wide --show-labels

# update
kubectl label pod nginx -n dev version=2.0 --overwrite

# filter
kubectl get pod -n dev -o wide -l "version=2.0" --show-labels

kubectl get pod -n dev -o wide -l "version!=2.0" --show-labels

# delete label
kubectl label pod nginx -n dev tier-

配置方式打标签

yaml
apiVersion: v1
kind: Pod
metadata:
    name: nginx
    namespace: dev
    labels:
        version: '3.0'
        evn: 'test'
spec:
    containers:
        - image: nginx:1.17.1
          name: pod
          ports:
              - name: nginx-port
                containerPort: 80
                protocol: TCP

执行

sh
kubectl apply -f pod-nginx.yaml

Deployment

命令操作

sh
# 命令格式: kubectl run deployment名称 [参数]

 kubectl run nginx --image=nginx:1.17.1 --port=80  --replicas=3 -n dev
 
# check
kubectl get deployment,pod -n dev -o wide

NAME                    READY   UP-TO-DATE   AVAILABLE   AGE    CONTAINERS   IMAGES         SELECTOR
deployment.apps/nginx   3/3     3            3           2m3s   nginx        nginx:1.17.1   run=nginx

NAME                         READY   STATUS    RESTARTS   AGE    IP            NODE    NOMINATED NODE   READINESS GATES
pod/nginx-64777cd554-mhc5k   1/1     Running   0          2m3s   10.244.1.8    node2   <none>           <none>
pod/nginx-64777cd554-sbv7g   1/1     Running   0          2m3s   10.244.2.24   node1   <none>           <none>

# describe 
kubectl describe deployment nginx -n dev

kubectl get pod -n dev -o wide --show-labels

# delete
kubectl delete deploy nginx -n dev

配置操作

创建deploy-nginx.yaml

yaml
apiVersion: apps/v1
kind: Deployment
metadata:
    name: nginx
    namespace: dev
    labels:
        version: '3.0'
        evn: 'test'
spec:
    replicas: 3
    selector:
        matchLabels:
            run: nginx
    template:
        metadata:
            labels:
                run: nginx
        spec:
            containers:
                - image: nginx:1.17.1
                  name: nginx
                  ports:
                      - containerPort: 80
                        protocol: TCP

操作

sh
# create
kubectl create -f deploy-nginx.yaml

# check 
kubectl get deployment,pod -n dev -o wide

# delete
kubectl delete -f deploy-nginx.yaml

Services

Service可以看作是一组同类的Pod对外访问的接口,借助Service,应用可以方便地实现服务发现和负载均衡。

集群内部访问

sh
# create service
kubectl expose deployment nginx --name=svc-nginx1 --type=ClusterIP --port=80 --target-port=80 -n dev

# check
kubectl get svc -n dev

集群外部访问的Service

sh
# create service
kubectl expose deployment nginx --name=svc-nginx2 --type=NodePort --port=80 --target-port=80 -n dev

# check 发现出现了NodePort类型的Service,而且有一对Port(80:31928/TCP)
kubectl get svc -n dev

curl http://192.168.109.100:31467/

# 若无法访问则坚持ip
kubectl describe nodes node1

Annotations:        flannel.alpha.coreos.com/backend-data: {"VtepMAC":"fe:24:70:68:3a:4d"}
                    flannel.alpha.coreos.com/backend-type: vxlan
                    flannel.alpha.coreos.com/kube-subnet-manager: true
                    flannel.alpha.coreos.com/public-ip: 192.168.0.105
                    kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
                    node.alpha.kubernetes.io/ttl: 0
                    volumes.kubernetes.io/controller-managed-attach-detach: true

配置方式

创建svc-nginx.yaml

yaml
apiVersion: v1
kind: Service
metadata:
    name: svc-nginx
    namespace: dev
    labels:
        version: '3.0'
        evn: 'test'
spec:
    ports:
        - name: http
          port: 80
          protocol: TCP
          targetPort: 80
    selector:
        run: nginx
    type: ClusterIP
sh
kubectl create -f svc-nginx.yaml