数据存储
volume类型
- 简单存储:EmptyDir、HostPath、NFS
- 高级存储:PV、PVC
- 配置存储:ConfigMap、Secret
EmptyDir
yaml
apiVersion: v1
kind: Pod
metadata:
name: volume-emptydir
namespace: dev
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
volumeMounts:
- name: html
mountPath: /var/log/nginx
- name: busybox
image: busybox:latest
command: ['/bin/sh', '-c', 'tail -f /var/log/nginx/access.log']
volumeMounts:
- name: html
mountPath: /var/log/nginx
volumes:
- name: logs-volume
emptyDir: {}
操作
sh
# 通过podIP访问nginx
curl 20.224.2.100
# 查看
kubectl logs -f volume-emptydir -n dev -c busybox
HostPath
持久化存储
yaml
apiVersion: v1
kind: Pod
metadata:
name: volume-emptydir
namespace: dev
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
volumeMounts:
- name: html
mountPath: /var/log/nginx
- name: busybox
image: busybox:latest
command: ['/bin/sh', '-c', 'tail -f /var/log/nginx/access.log']
volumeMounts:
- name: html
mountPath: /var/log/nginx
volumes:
- name: logs-volume
hostPath:
path: /root/logs
type: DirectoryOrCreate
NFS
sh
# 在master上安装nfs服务, 从节点上只需要安装,不需要启动,这样是为了驱动nfs设备
yum install -y nfs-utils
# 创建共享目录
mkdir -pv /root/data/nfs
# 将共享目录以读写权限暴露给192.168.109.0/24网段中的所有主机
vim /etc/exports # /root/data/nfs 192.168.109.0/24(rw,no_root_squash)
# start
systemctl start nfs
yaml
apiVersion: v1
kind: Pod
metadata:
name: volume-emptydir
namespace: dev
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
volumeMounts:
- name: html
mountPath: /var/log/nginx
- name: busybox
image: busybox:latest
command: ['/bin/sh', '-c', 'tail -f /var/log/nginx/access.log']
volumeMounts:
- name: html
mountPath: /var/log/nginx
volumes:
- name: logs-volume
nfs:
server: 192.168.109.100
path: /root/data/nfs
PV
yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: mypv
spec:
nfs:
path: /tmp
server: 172.17.0.2
capacity:
storage: 2Gi
accessModes:
- ReadWriteMany
storageClassName: nfs
persistentVolumeReclaimPolicy: Retain
sh
# create dir
mkdir -pv /root/data/{pv1, pv2, pv3}
# 暴露服务
more /etc/exports
/root/data/pv1 192.168.109.0/24(rw,no_root_squash)
/root/data/pv2 192.168.109.0/24(rw,no_root_squash)
/root/data/pv3 192.168.109.0/24(rw,no_root_squash)
# start
systemctl start nfs
配置文件
yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: mypv1
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
storageClassName: nfs
persistentVolumeReclaimPolicy: Retain
nfs:
path: /root/data/pv1
server: 192.168.109.100
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mypv2
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteMany
storageClassName: nfs
persistentVolumeReclaimPolicy: Retain
nfs:
path: /root/data/pv2
server: 192.168.109.100
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mypv3
spec:
capacity:
storage: 3Gi
accessModes:
- ReadWriteMany
storageClassName: nfs
persistentVolumeReclaimPolicy: Retain
nfs:
path: /root/data/pv3
server: 192.168.109.100
pvc
yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mypvc1
namespace: dev
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mypvc2
namespace: dev
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mypvc3
namespace: dev
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5Gi
操作
sh
kubectl create -f pvc.yaml
kubectl get pvc -n dev
kubectl get pv
pods.yaml
yaml
apiVersion: v1
kind: Pod
metadata:
name: my-pod
namespace: dev
spec:
containers:
- name: my-container
image: busybox:latest
command: ['sh', '-c', 'echo Hello, Kubernetes! && sleep 3600']
volumeMounts:
- name: volume
mountPath: /root/
volumes:
- name: volume
persistentVolumeClaim:
claimName: mypvc1
readOnly: false